id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16,921
|
queuecreator.cpp
|
manticoresoftware_manticoresearch/src/queuecreator.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "queuecreator.h"
#include "std/openhash.h"
#include "schema/rset.h"
#include "columnargrouper.h"
#include "columnarsort.h"
#include "exprgeodist.h"
#include "exprremap.h"
#include "exprdocstore.h"
#include "sphinxjson.h"
#include "joinsorter.h"
#include "columnarexpr.h"
#include "sphinxfilter.h"
#include "queryprofile.h"
#include "knnmisc.h"
static const char g_sIntAttrPrefix[] = "@int_attr_";
static const char g_sIntJsonPrefix[] = "@groupbystr_";
bool HasImplicitGrouping ( const CSphQuery & tQuery )
{
auto fnIsImplicit = [] ( const CSphQueryItem & t )
{
return ( t.m_eAggrFunc!=SPH_AGGR_NONE ) || t.m_sExpr=="count(*)" || t.m_sExpr=="@distinct";
};
return tQuery.m_sGroupBy.IsEmpty() ? tQuery.m_dItems.any_of(fnIsImplicit) : false;
}
bool sphHasExpressions ( const CSphQuery & tQuery, const CSphSchema & tSchema )
{
return !tQuery.m_dItems.all_of ( [&tSchema] ( const CSphQueryItem& tItem )
{
const CSphString & sExpr = tItem.m_sExpr;
// all expressions that come from parser are automatically aliased
assert ( !tItem.m_sAlias.IsEmpty() );
return sExpr=="*"
|| ( tSchema.GetAttrIndex ( sExpr.cstr() )>=0 && tItem.m_eAggrFunc==SPH_AGGR_NONE && tItem.m_sAlias==sExpr )
|| IsGroupbyMagic ( sExpr );
});
}
int GetAliasedAttrIndex ( const CSphString & sAttr, const CSphQuery & tQuery, const ISphSchema & tSchema )
{
int iAttr = tSchema.GetAttrIndex ( sAttr.cstr() );
if ( iAttr>=0 )
return iAttr;
// try aliased groupby attr (facets)
ARRAY_FOREACH ( i, tQuery.m_dItems )
{
if ( sAttr==tQuery.m_dItems[i].m_sExpr )
return tSchema.GetAttrIndex ( tQuery.m_dItems[i].m_sAlias.cstr() );
else if ( sAttr==tQuery.m_dItems[i].m_sAlias )
return tSchema.GetAttrIndex ( tQuery.m_dItems[i].m_sExpr.cstr() );
}
return iAttr;
}
static bool IsCount ( const CSphString & s )
{
return s=="@count" || s=="count(*)";
}
static bool IsGroupby ( const CSphString & s )
{
return s=="@groupby"
|| s=="@distinct"
|| s=="groupby()"
|| IsSortJsonInternal(s);
}
bool IsGroupbyMagic ( const CSphString & s )
{
return IsGroupby ( s ) || IsCount ( s );
}
ESphAttr DetermineNullMaskType ( int iNumAttrs )
{
if ( iNumAttrs<=32 )
return SPH_ATTR_INTEGER;
if ( iNumAttrs<=64 )
return SPH_ATTR_BIGINT;
return SPH_ATTR_STRINGPTR;
}
const char * GetInternalAttrPrefix()
{
return g_sIntAttrPrefix;
}
const char * GetInternalJsonPrefix()
{
return g_sIntJsonPrefix;
}
bool IsSortStringInternal ( const CSphString & sColumnName )
{
assert ( sColumnName.cstr ());
return ( strncmp ( sColumnName.cstr (), g_sIntAttrPrefix, sizeof ( g_sIntAttrPrefix )-1 )==0 );
}
bool IsSortJsonInternal ( const CSphString& sColumnName )
{
assert ( sColumnName.cstr ());
return ( strncmp ( sColumnName.cstr (), g_sIntJsonPrefix, sizeof ( g_sIntJsonPrefix )-1 )==0 );
}
CSphString SortJsonInternalSet ( const CSphString& sColumnName )
{
CSphString sName;
if ( !sColumnName.IsEmpty() )
( StringBuilder_c () << g_sIntJsonPrefix << sColumnName ).MoveTo ( sName );
return sName;
}
///////////////////////////////////////////////////////////////////////////////
static bool ExprHasJoinPrefix ( const CSphString & sExpr, const JoinArgs_t * pArgs )
{
if ( !pArgs )
return false;
CSphString sPrefix;
sPrefix.SetSprintf ( "%s.", pArgs->m_sIndex2.cstr() );
const char * szFound = strstr ( sExpr.cstr(), sPrefix.cstr() );
if ( !szFound )
return false;
if ( szFound > sExpr.cstr() )
{
char c = *(szFound-1);
if ( ( c>='0' && c<='9' ) || ( c>='a' && c<='z' ) || ( c>='A' && c<='Z' ) || c=='_' )
return false;
}
return true;
}
static bool IsKnnDist ( const CSphString & sExpr )
{
return sExpr==GetKnnDistAttrName() || sExpr=="knn_dist()";
}
static inline ESphSortKeyPart Attr2Keypart ( ESphAttr eType )
{
switch ( eType )
{
case SPH_ATTR_FLOAT:
return SPH_KEYPART_FLOAT;
case SPH_ATTR_DOUBLE:
return SPH_KEYPART_DOUBLE;
case SPH_ATTR_STRING:
return SPH_KEYPART_STRING;
case SPH_ATTR_JSON:
case SPH_ATTR_JSON_PTR:
case SPH_ATTR_JSON_FIELD:
case SPH_ATTR_JSON_FIELD_PTR:
case SPH_ATTR_STRINGPTR:
return SPH_KEYPART_STRINGPTR;
default:
return SPH_KEYPART_INT;
}
}
///////////////////////////////////////////////////////////////////////////////
void CSphGroupSorterSettings::FixupLocators ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema )
{
sphFixupLocator ( m_tLocGroupby, pOldSchema, pNewSchema );
sphFixupLocator ( m_tLocCount, pOldSchema, pNewSchema );
sphFixupLocator ( m_tLocDistinct, pOldSchema, pNewSchema );
sphFixupLocator ( m_tLocGroupbyStr, pOldSchema, pNewSchema );
if ( m_pDistinctFetcher )
m_pDistinctFetcher->FixupLocators ( pOldSchema, pNewSchema );
}
void CSphGroupSorterSettings::SetupDistinctAccuracy ( int iThresh )
{
if ( !iThresh )
{
m_iDistinctAccuracy = 0;
return;
}
iThresh = int ( float(iThresh) / OpenHashTable_T<int,int>::GetLoadFactor() ) + 1;
m_iDistinctAccuracy = iThresh ? sphLog2(iThresh) + 4 : 0;
m_iDistinctAccuracy = Min ( m_iDistinctAccuracy, 18 );
m_iDistinctAccuracy = Max ( m_iDistinctAccuracy, 14 );
}
///////////////////////////////////////////////////////////////////////////////
class QueueCreator_c
{
public:
bool m_bMulti = false;
bool m_bCreate = true;
bool m_bZonespanlist = false;
DWORD m_uPackedFactorFlags = SPH_FACTOR_DISABLE;
bool m_bJoinedGroupSort = false; // do we need joined attrs for sorting/grouping?
QueueCreator_c ( const SphQueueSettings_t & tSettings, const CSphQuery & tQuery, CSphString & sError, StrVec_t * pExtra, QueryProfile_c * pProfile );
bool SetupComputeQueue();
bool SetupGroupQueue();
bool SetupQueue();
CSphRsetSchema & SorterSchema() const { return *m_pSorterSchema; }
bool HasJson() const { return m_tGroupSorterSettings.m_bJson; }
bool SetSchemaGroupQueue ( const CSphRsetSchema & tNewSchema );
/// creates proper queue for given query
/// may return NULL on error; in this case, error message is placed in sError
/// if the pUpdate is given, creates the updater's queue and perform the index update
/// instead of searching
ISphMatchSorter * CreateQueue();
private:
const SphQueueSettings_t & m_tSettings;
const CSphQuery & m_tQuery;
CSphString & m_sError;
StrVec_t * m_pExtra = nullptr;
QueryProfile_c * m_pProfile = nullptr;
bool m_bHasCount = false;
bool m_bHasGroupByExpr = false;
sph::StringSet m_hQueryAttrs;
std::unique_ptr<CSphRsetSchema> m_pSorterSchema;
bool m_bGotGroupby;
bool m_bRandomize;
ESphSortFunc m_eMatchFunc = FUNC_REL_DESC;
ESphSortFunc m_eGroupFunc = FUNC_REL_DESC;
CSphMatchComparatorState m_tStateMatch;
CSphVector<ExtraSortExpr_t> m_dMatchJsonExprs;
CSphMatchComparatorState m_tStateGroup;
CSphVector<ExtraSortExpr_t> m_dGroupJsonExprs;
CSphGroupSorterSettings m_tGroupSorterSettings;
CSphVector<std::pair<int,bool>> m_dGroupColumns;
StrVec_t m_dGroupJsonAttrs;
bool m_bHeadWOGroup;
bool m_bGotDistinct;
bool m_bExprsNeedDocids = false;
// for sorter to create pooled attributes
bool m_bHaveStar = false;
// fixme! transform to StringSet on end of merge!
sph::StringSet m_hQueryColumns; // FIXME!!! unify with Extra schema after merge master into branch
sph::StringSet m_hQueryDups;
sph::StringSet m_hExtra;
bool ParseQueryItem ( const CSphQueryItem & tItem );
bool MaybeAddGeodistColumn();
bool MaybeAddExprColumn();
bool MaybeAddExpressionsFromSelectList();
bool AddExpressionsForUpdates();
bool MaybeAddGroupbyMagic ( bool bGotDistinct );
bool AddKNNDistColumn();
bool AddJoinAttrs();
bool CheckJoinOnTypeCast ( const CSphString & sIdx, const CSphString & sAttr, ESphAttr eTypeCast );
bool AddJoinFilterAttrs();
bool AddJsonJoinOnFilter ( const CSphString & sAttr1, const CSphString & sAttr2, ESphAttr eTypeCast );
bool AddNullBitmask();
bool AddColumnarJoinOnFilter ( const CSphString & sAttr );
bool CheckHavingConstraints() const;
bool SetupGroupbySettings ( bool bHasImplicitGrouping );
void AssignOrderByToPresortStage ( const int * pAttrs, int iAttrCount );
void AddAttrsFromSchema ( const ISphSchema & tSchema, const CSphString & sPrefix );
void ModifyExprForJoin ( CSphColumnInfo & tExprCol, const CSphString & sExpr );
void SelectExprEvalStage ( CSphColumnInfo & tExprCol );
void ReplaceGroupbyStrWithExprs ( CSphMatchComparatorState & tState, int iNumOldAttrs );
void ReplaceStaticStringsWithExprs ( CSphMatchComparatorState & tState );
void ReplaceJsonWithExprs ( CSphMatchComparatorState & tState, CSphVector<ExtraSortExpr_t> & dExtraExprs );
void AddColumnarExprsAsAttrs ( CSphMatchComparatorState & tState, CSphVector<ExtraSortExpr_t> & dExtraExprs );
void RemapAttrs ( CSphMatchComparatorState & tState, CSphVector<ExtraSortExpr_t> & dExtraExprs );
static void SetupRemapColJson ( CSphColumnInfo & tRemapCol, CSphMatchComparatorState & tState, CSphVector<ExtraSortExpr_t> & dExtraExprs, int iStateAttr ) ;
const CSphColumnInfo * GetGroupbyStr ( int iAttr, int iNumOldAttrs ) const;
bool SetupMatchesSortingFunc();
bool SetupGroupSortingFunc ( bool bGotDistinct );
bool AddGroupbyStuff();
void AddKnnDistSort ( CSphString & sSortBy );
bool ParseJoinExpr ( CSphColumnInfo & tExprCol, const CSphString & sAttr, const CSphString & sExpr ) const;
bool SetGroupSorting();
void ExtraAddSortkeys ( const int * dAttrs );
bool AddStoredFieldExpressions();
bool AddColumnarAttributeExpressions();
void CreateGrouperByAttr ( ESphAttr eType, const CSphColumnInfo & tGroupByAttr, bool & bGrouperUsesAttrs );
void SelectStageForColumnarExpr ( CSphColumnInfo & tExprCol );
void FetchDependencyChains ( StrVec_t & dDependentCols );
void PropagateEvalStage ( CSphColumnInfo & tExprCol, StrVec_t & dDependentCols );
bool SetupDistinctAttr();
bool PredictAggregates() const;
bool ReplaceWithColumnarItem ( const CSphString & sAttr, ESphEvalStage eStage );
int ReduceMaxMatches() const;
int AdjustMaxMatches ( int iMaxMatches ) const;
bool ConvertColumnarToDocstore();
CSphString GetAliasedColumnarAttrName ( const CSphColumnInfo & tAttr ) const;
bool SetupAggregateExpr ( CSphColumnInfo & tExprCol, const CSphString & sExpr, DWORD uQueryPackedFactorFlags );
bool SetupColumnarAggregates ( CSphColumnInfo & tExprCol );
bool IsJoinAttr ( const CSphString & sAttr ) const;
void ReplaceJsonGroupbyWithStrings ( CSphString & sJsonGroupBy );
void UpdateAggregateDependencies ( CSphColumnInfo & tExprCol );
int GetGroupbyAttrIndex() const { return GetAliasedAttrIndex ( m_tQuery.m_sGroupBy, m_tQuery, *m_pSorterSchema ); }
int GetGroupDistinctAttrIndex() const { return GetAliasedAttrIndex ( m_tQuery.m_sGroupDistinct, m_tQuery, *m_pSorterSchema ); }
bool CanCalcFastCountDistinct() const;
bool CanCalcFastCountFilter() const;
bool CanCalcFastCount() const;
PrecalculatedSorterResults_t FetchPrecalculatedValues() const;
ISphMatchSorter * SpawnQueue();
std::unique_ptr<ISphFilter> CreateAggrFilter() const;
void SetupCollation();
bool Err ( const char * sFmt, ... ) const;
};
QueueCreator_c::QueueCreator_c ( const SphQueueSettings_t & tSettings, const CSphQuery & tQuery, CSphString & sError, StrVec_t * pExtra, QueryProfile_c * pProfile )
: m_tSettings ( tSettings )
, m_tQuery ( tQuery )
, m_sError ( sError )
, m_pExtra ( pExtra )
, m_pProfile ( pProfile )
, m_pSorterSchema { std::make_unique<CSphRsetSchema>() }
{
// short-cuts
m_sError = "";
*m_pSorterSchema = m_tSettings.m_tSchema;
m_dMatchJsonExprs.Resize ( CSphMatchComparatorState::MAX_ATTRS );
m_dGroupJsonExprs.Resize ( CSphMatchComparatorState::MAX_ATTRS );
}
CSphString QueueCreator_c::GetAliasedColumnarAttrName ( const CSphColumnInfo & tAttr ) const
{
if ( !tAttr.IsColumnarExpr() )
return tAttr.m_sName;
CSphString sAliasedCol;
tAttr.m_pExpr->Command ( SPH_EXPR_GET_COLUMNAR_COL, &sAliasedCol );
return sAliasedCol;
}
void QueueCreator_c::CreateGrouperByAttr ( ESphAttr eType, const CSphColumnInfo & tGroupByAttr, bool & bGrouperUsesAttrs )
{
assert ( m_pSorterSchema );
auto & tSchema = *m_pSorterSchema;
const CSphAttrLocator & tLoc = tGroupByAttr.m_tLocator;
switch ( eType )
{
case SPH_ATTR_JSON:
case SPH_ATTR_JSON_FIELD:
{
ExprParseArgs_t tExprArgs;
tExprArgs.m_eCollation = m_tQuery.m_eCollation;
ISphExprRefPtr_c pExpr { sphExprParse ( m_tQuery.m_sGroupBy.cstr(), tSchema, m_tSettings.m_pJoinArgs ? &(m_tSettings.m_pJoinArgs->m_sIndex2) : nullptr, m_sError, tExprArgs ) };
m_tGroupSorterSettings.m_pGrouper = CreateGrouperJsonField ( tLoc, pExpr );
m_tGroupSorterSettings.m_bJson = true;
}
break;
case SPH_ATTR_STRING:
case SPH_ATTR_STRINGPTR:
// percolate select list push matches with string_ptr
// check if it is a columnar attr or an expression spawned instead of a columnar attr
// even if it is an expression, spawn a new one, because a specialized grouper works a lot faster because it doesn't allocate and store string in the match
if ( tGroupByAttr.IsColumnar() || tGroupByAttr.IsColumnarExpr() )
{
m_tGroupSorterSettings.m_pGrouper = CreateGrouperColumnarString ( GetAliasedColumnarAttrName(tGroupByAttr), m_tQuery.m_eCollation );
bGrouperUsesAttrs = false;
}
else if ( tGroupByAttr.m_pExpr && !tGroupByAttr.m_pExpr->IsDataPtrAttr() )
{
m_tGroupSorterSettings.m_pGrouper = CreateGrouperStringExpr ( tGroupByAttr.m_pExpr, m_tQuery.m_eCollation );
bGrouperUsesAttrs = false;
}
else
m_tGroupSorterSettings.m_pGrouper = CreateGrouperString ( tLoc, m_tQuery.m_eCollation );
break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
case SPH_ATTR_UINT32SET_PTR:
case SPH_ATTR_INT64SET_PTR:
if ( tGroupByAttr.IsColumnar() || tGroupByAttr.IsColumnarExpr() )
{
m_tGroupSorterSettings.m_pGrouper = CreateGrouperColumnarMVA ( GetAliasedColumnarAttrName(tGroupByAttr), eType );
bGrouperUsesAttrs = false;
}
else
{
if ( eType==SPH_ATTR_UINT32SET || eType==SPH_ATTR_UINT32SET_PTR )
m_tGroupSorterSettings.m_pGrouper = CreateGrouperMVA32(tLoc);
else
m_tGroupSorterSettings.m_pGrouper = CreateGrouperMVA64(tLoc);
}
break;
case SPH_ATTR_BOOL:
case SPH_ATTR_INTEGER:
case SPH_ATTR_BIGINT:
case SPH_ATTR_FLOAT:
if ( tGroupByAttr.IsColumnar() || ( tGroupByAttr.IsColumnarExpr() && tGroupByAttr.m_eStage>SPH_EVAL_PREFILTER ) )
{
m_tGroupSorterSettings.m_pGrouper = CreateGrouperColumnarInt ( GetAliasedColumnarAttrName(tGroupByAttr), eType );
bGrouperUsesAttrs = false;
}
break;
default:
break;
}
if ( !m_tGroupSorterSettings.m_pGrouper )
m_tGroupSorterSettings.m_pGrouper = CreateGrouperAttr(tLoc);
}
bool QueueCreator_c::SetupDistinctAttr()
{
const CSphString & sDistinct = m_tQuery.m_sGroupDistinct;
if ( sDistinct.IsEmpty() )
return true;
assert ( m_pSorterSchema );
auto & tSchema = *m_pSorterSchema;
int iDistinct = tSchema.GetAttrIndex ( sDistinct.cstr() );
if ( iDistinct<0 )
{
CSphString sJsonCol;
if ( !sphJsonNameSplit ( sDistinct.cstr(), m_tQuery.m_sJoinIdx.cstr(), &sJsonCol ) )
{
return Err ( "group-count-distinct attribute '%s' not found", sDistinct.cstr() );
return false;
}
CSphColumnInfo tExprCol ( sDistinct.cstr(), SPH_ATTR_JSON_FIELD_PTR );
tExprCol.m_eStage = SPH_EVAL_SORTER;
tExprCol.m_uAttrFlags = CSphColumnInfo::ATTR_JOINED;
m_pSorterSchema->AddAttr ( tExprCol, true );
iDistinct = m_pSorterSchema->GetAttrIndex ( tExprCol.m_sName.cstr() );
}
const auto & tDistinctAttr = tSchema.GetAttr(iDistinct);
if ( IsNotRealAttribute(tDistinctAttr) )
return Err ( "group-count-distinct attribute '%s' not found", sDistinct.cstr() );
if ( tDistinctAttr.IsColumnar() )
m_tGroupSorterSettings.m_pDistinctFetcher = CreateColumnarDistinctFetcher ( tDistinctAttr.m_sName, tDistinctAttr.m_eAttrType, m_tQuery.m_eCollation );
else
m_tGroupSorterSettings.m_pDistinctFetcher = CreateDistinctFetcher ( tDistinctAttr.m_sName, tDistinctAttr.m_tLocator, tDistinctAttr.m_eAttrType );
m_bJoinedGroupSort |= IsJoinAttr(tDistinctAttr.m_sName);
return true;
}
bool QueueCreator_c::SetupGroupbySettings ( bool bHasImplicitGrouping )
{
if ( m_tQuery.m_sGroupBy.IsEmpty() && !bHasImplicitGrouping )
return true;
if ( m_tQuery.m_eGroupFunc==SPH_GROUPBY_ATTRPAIR )
return Err ( "SPH_GROUPBY_ATTRPAIR is not supported any more (just group on 'bigint' attribute)" );
assert ( m_pSorterSchema );
auto & tSchema = *m_pSorterSchema;
m_tGroupSorterSettings.m_iMaxMatches = m_tSettings.m_iMaxMatches;
if ( !SetupDistinctAttr() )
return false;
CSphString sJsonColumn;
if ( m_tQuery.m_eGroupFunc==SPH_GROUPBY_MULTIPLE )
{
CSphVector<CSphColumnInfo> dAttrs;
VecRefPtrs_t<ISphExpr *> dJsonKeys;
StrVec_t dGroupBy;
sph::Split ( m_tQuery.m_sGroupBy.cstr (), -1, ",", [&] ( const char * sToken, int iLen )
{
CSphString sGroupBy ( sToken, iLen );
sGroupBy.Trim ();
dGroupBy.Add ( std::move ( sGroupBy ));
} );
dGroupBy.Uniq();
for ( auto & sGroupBy : dGroupBy )
{
int iAttr = tSchema.GetAttrIndex ( sGroupBy.cstr() );
CSphString sJsonExpr;
if ( iAttr<0 && sphJsonNameSplit ( sGroupBy.cstr(), m_tQuery.m_sJoinIdx.cstr(), &sJsonColumn ) )
{
sJsonExpr = sGroupBy;
sGroupBy = sJsonColumn;
}
iAttr = tSchema.GetAttrIndex ( sGroupBy.cstr() );
if ( iAttr<0 )
return Err( "group-by attribute '%s' not found", sGroupBy.cstr() );
auto tAttr = tSchema.GetAttr ( iAttr );
ESphAttr eType = tAttr.m_eAttrType;
if ( eType==SPH_ATTR_UINT32SET || eType==SPH_ATTR_INT64SET )
return Err ( "MVA values can't be used in multiple group-by" );
if ( eType==SPH_ATTR_JSON && sJsonExpr.IsEmpty() )
return Err ( "JSON blob can't be used in multiple group-by" );
dAttrs.Add ( tAttr );
m_dGroupColumns.Add ( { iAttr, true } );
m_dGroupJsonAttrs.Add(sJsonExpr);
if ( !sJsonExpr.IsEmpty() )
{
ExprParseArgs_t tExprArgs;
dJsonKeys.Add ( sphExprParse ( sJsonExpr.cstr(), tSchema, m_tSettings.m_pJoinArgs ? &(m_tSettings.m_pJoinArgs->m_sIndex2) : nullptr, m_sError, tExprArgs ) );
}
else if ( tAttr.m_eAttrType==SPH_ATTR_JSON_FIELD )
{
assert ( tAttr.m_pExpr );
dJsonKeys.Add ( tAttr.m_pExpr->Clone() );
} else
{
dJsonKeys.Add ( nullptr );
}
m_bJoinedGroupSort |= IsJoinAttr(sGroupBy);
}
m_tGroupSorterSettings.m_pGrouper = CreateGrouperMulti ( dAttrs, std::move(dJsonKeys), m_tQuery.m_eCollation );
return true;
}
int iGroupBy = GetGroupbyAttrIndex();
bool bJoined = iGroupBy>=0 && m_pSorterSchema->GetAttr(iGroupBy).IsJoined();
if ( ( iGroupBy<0 || bJoined ) && sphJsonNameSplit ( m_tQuery.m_sGroupBy.cstr(), m_tQuery.m_sJoinIdx.cstr(), &sJsonColumn ) )
{
const int iAttr = tSchema.GetAttrIndex ( sJsonColumn.cstr() );
if ( iAttr<0 )
return Err ( "groupby: no such attribute '%s'", sJsonColumn.cstr ());
if ( tSchema.GetAttr(iAttr).m_eAttrType!=SPH_ATTR_JSON
&& tSchema.GetAttr(iAttr).m_eAttrType!=SPH_ATTR_JSON_PTR )
return Err ( "groupby: attribute '%s' does not have subfields (must be sql_attr_json)", sJsonColumn.cstr() );
if ( m_tQuery.m_eGroupFunc!=SPH_GROUPBY_ATTR )
return Err ( "groupby: legacy groupby modes are not supported on JSON attributes" );
m_dGroupColumns.Add ( { iAttr, true } );
ExprParseArgs_t tExprArgs;
tExprArgs.m_eCollation = m_tQuery.m_eCollation;
ISphExprRefPtr_c pExpr { sphExprParse ( m_tQuery.m_sGroupBy.cstr(), tSchema, m_tSettings.m_pJoinArgs ? &(m_tSettings.m_pJoinArgs->m_sIndex2) : nullptr, m_sError, tExprArgs ) };
m_tGroupSorterSettings.m_pGrouper = CreateGrouperJsonField ( tSchema.GetAttr(iAttr).m_tLocator, pExpr );
m_tGroupSorterSettings.m_bJson = true;
m_bJoinedGroupSort |= bJoined;
return true;
}
if ( bHasImplicitGrouping )
{
m_tGroupSorterSettings.m_bImplicit = true;
return true;
}
// setup groupby attr
if ( iGroupBy<0 )
return Err ( "group-by attribute '%s' not found", m_tQuery.m_sGroupBy.cstr() );
const CSphColumnInfo & tGroupByAttr = tSchema.GetAttr(iGroupBy);
if ( m_tSettings.m_bComputeItems && tGroupByAttr.m_pExpr && tGroupByAttr.m_pExpr->UsesDocstore() )
return Err ( "unable to group by stored field '%s'", m_tQuery.m_sGroupBy.cstr() );
ESphAttr eType = tGroupByAttr.m_eAttrType;
CSphAttrLocator tLoc = tGroupByAttr.m_tLocator;
m_bJoinedGroupSort |= IsJoinAttr ( tGroupByAttr.m_sName );
bool bGrouperUsesAttrs = true;
switch (m_tQuery.m_eGroupFunc )
{
case SPH_GROUPBY_DAY:
m_tGroupSorterSettings.m_pGrouper = CreateGrouperDay(tLoc); break;
case SPH_GROUPBY_WEEK:
m_tGroupSorterSettings.m_pGrouper = CreateGrouperWeek(tLoc); break;
case SPH_GROUPBY_MONTH:
m_tGroupSorterSettings.m_pGrouper = CreateGrouperMonth(tLoc); break;
case SPH_GROUPBY_YEAR:
m_tGroupSorterSettings.m_pGrouper = CreateGrouperYear(tLoc); break;
case SPH_GROUPBY_ATTR:
CreateGrouperByAttr ( eType, tGroupByAttr, bGrouperUsesAttrs );
break;
default:
return Err ( "invalid group-by mode (mode=%d)", m_tQuery.m_eGroupFunc );
}
m_dGroupColumns.Add ( { iGroupBy, bGrouperUsesAttrs } );
return true;
}
// move expressions used in ORDER BY or WITHIN GROUP ORDER BY to presort phase
void QueueCreator_c::AssignOrderByToPresortStage ( const int * pAttrs, int iAttrCount )
{
if ( !iAttrCount )
return;
assert ( pAttrs );
assert ( m_pSorterSchema );
StrVec_t dCur;
// add valid attributes to processing list
for ( int i=0; i<iAttrCount; ++i )
if ( pAttrs[i]>=0 )
dCur.Add ( m_pSorterSchema->GetAttr ( pAttrs[i] ).m_sName );
// collect columns which affect current expressions
ARRAY_FOREACH ( i, dCur )
{
const CSphColumnInfo * pCol = m_pSorterSchema->GetAttr ( dCur[i].cstr() );
assert(pCol);
if ( pCol->m_eStage>SPH_EVAL_PRESORT && pCol->m_pExpr )
pCol->m_pExpr->Command ( SPH_EXPR_GET_DEPENDENT_COLS, &dCur );
}
// get rid of dupes
dCur.Uniq();
// fix up of attributes stages
for ( const auto & sAttr : dCur )
{
auto pCol = const_cast<CSphColumnInfo *>( m_pSorterSchema->GetAttr ( sAttr.cstr() ) );
assert(pCol);
if ( pCol->m_eStage==SPH_EVAL_FINAL )
pCol->m_eStage = SPH_EVAL_PRESORT;
}
}
void QueueCreator_c::ExtraAddSortkeys ( const int * dAttrs )
{
for ( int i=0; i<CSphMatchComparatorState::MAX_ATTRS; ++i )
if ( dAttrs[i]>=0 )
{
const auto & tAttr = m_pSorterSchema->GetAttr ( dAttrs[i] );
m_bJoinedGroupSort |= tAttr.IsJoined();
m_hExtra.Add ( tAttr.m_sName );
if ( m_tSettings.m_bComputeItems )
{
// check if dependent columns are joined
StrVec_t dCols;
dCols.Add ( tAttr.m_sName );
FetchDependencyChains(dCols);
for ( const auto & sAttr : dCols )
{
const CSphColumnInfo * pAttr = m_pSorterSchema->GetAttr ( sAttr.cstr() );
assert(pAttr);
m_bJoinedGroupSort |= pAttr->IsJoined();
}
}
}
}
bool QueueCreator_c::Err ( const char * sFmt, ... ) const
{
va_list ap;
va_start ( ap, sFmt );
m_sError.SetSprintfVa ( sFmt, ap );
va_end ( ap );
return false;
}
void QueueCreator_c::SelectStageForColumnarExpr ( CSphColumnInfo & tExprCol )
{
if ( !tExprCol.IsColumnarExpr() )
{
tExprCol.m_eStage = SPH_EVAL_PREFILTER;
return;
}
// columnar expressions are a special case
// it is sometimes faster to evaluate them in the filter than to evaluate the expression, store it in the match and then use it in the filter
// FIXME: add sorters?
int iRank = 0;
iRank += tExprCol.m_sName==m_tQuery.m_sGroupBy ? 1 : 0;
iRank += m_tQuery.m_dFilters.any_of ( [&tExprCol]( const CSphFilterSettings & tFilter ) { return tFilter.m_sAttrName==tExprCol.m_sName; } ) ? 1 : 0;
if ( iRank>1 )
tExprCol.m_eStage = SPH_EVAL_PREFILTER;
}
void QueueCreator_c::FetchDependencyChains ( StrVec_t & dDependentCols )
{
ARRAY_FOREACH ( i, dDependentCols )
{
const CSphString & sAttr = dDependentCols[i];
int iAttr = m_pSorterSchema->GetAttrIndex ( sAttr.cstr() );
assert ( iAttr>=0 );
if ( m_pSorterSchema->IsRemovedAttr(iAttr) )
continue;
const CSphColumnInfo & tCol = m_pSorterSchema->GetAttr(iAttr);
int iOldLen = dDependentCols.GetLength();
// handle chains of dependencies (e.g. SELECT 1+attr f1, f1-1 f2 ... WHERE f2>5)
if ( tCol.m_pExpr )
tCol.m_pExpr->Command ( SPH_EXPR_GET_DEPENDENT_COLS, &dDependentCols );
// some expressions depend on the column they are attached to (json fast key)
// so filter out duplicates to avoid circular dependencies
for ( int iNewAttr = iOldLen; iNewAttr < dDependentCols.GetLength(); iNewAttr++ )
if ( dDependentCols[iNewAttr]==dDependentCols[i] )
dDependentCols.Remove(iNewAttr);
}
dDependentCols.Uniq();
}
void QueueCreator_c::PropagateEvalStage ( CSphColumnInfo & tExprCol, StrVec_t & dDependentCols )
{
bool bWeight = false;
for ( const auto & sAttr : dDependentCols )
{
const CSphColumnInfo * pCol = m_pSorterSchema->GetAttr ( sAttr.cstr() );
assert(pCol);
bWeight |= pCol->m_bWeight;
}
if ( bWeight )
{
tExprCol.m_eStage = SPH_EVAL_PRESORT;
tExprCol.m_bWeight = true;
}
for ( const auto & sAttr : dDependentCols )
{
auto pDep = const_cast<CSphColumnInfo *> ( m_pSorterSchema->GetAttr ( sAttr.cstr() ) );
if ( pDep->m_eStage > tExprCol.m_eStage )
pDep->m_eStage = tExprCol.m_eStage;
}
}
bool QueueCreator_c::SetupAggregateExpr ( CSphColumnInfo & tExprCol, const CSphString & sExpr, DWORD uQueryPackedFactorFlags )
{
switch ( tExprCol.m_eAggrFunc )
{
case SPH_AGGR_AVG:
// force AVG() to be computed in doubles
tExprCol.m_eAttrType = SPH_ATTR_DOUBLE;
tExprCol.m_tLocator.m_iBitCount = 64;
break;
case SPH_AGGR_CAT:
// force GROUP_CONCAT() to be computed as strings
tExprCol.m_eAttrType = SPH_ATTR_STRINGPTR;
tExprCol.m_tLocator.m_iBitCount = ROWITEMPTR_BITS;
break;
case SPH_AGGR_SUM:
if ( tExprCol.m_eAttrType==SPH_ATTR_BOOL )
{
tExprCol.m_eAttrType = SPH_ATTR_INTEGER;
tExprCol.m_tLocator.m_iBitCount = 32;
} else if ( tExprCol.m_eAttrType==SPH_ATTR_INTEGER )
{
tExprCol.m_eAttrType = SPH_ATTR_BIGINT;
tExprCol.m_tLocator.m_iBitCount = 64;
}
break;
default:
break;
}
// force explicit type conversion for JSON attributes
if ( tExprCol.m_eAggrFunc!=SPH_AGGR_NONE && tExprCol.m_eAttrType==SPH_ATTR_JSON_FIELD )
return Err ( "ambiguous attribute type '%s', use INTEGER(), BIGINT() or DOUBLE() conversion functions", sExpr.cstr() );
if ( uQueryPackedFactorFlags & SPH_FACTOR_JSON_OUT )
tExprCol.m_eAttrType = SPH_ATTR_FACTORS_JSON;
return true;
}
bool QueueCreator_c::SetupColumnarAggregates ( CSphColumnInfo & tExprCol )
{
StrVec_t dDependentCols;
tExprCol.m_pExpr->Command ( SPH_EXPR_GET_DEPENDENT_COLS, &dDependentCols );
FetchDependencyChains(dDependentCols);
if ( !dDependentCols.GetLength() )
return tExprCol.IsColumnarExpr();
if ( dDependentCols.GetLength()==1 )
{
int iAttr = m_pSorterSchema->GetAttrIndex ( dDependentCols[0].cstr() );
assert ( iAttr>=0 );
if ( m_pSorterSchema->IsRemovedAttr(iAttr) )
return false;
const CSphColumnInfo & tColumnarAttr = m_pSorterSchema->GetAttr(iAttr);
if ( tColumnarAttr.IsColumnarExpr() )
{
CSphString sColumnarCol;
tColumnarAttr.m_pExpr->Command ( SPH_EXPR_GET_COLUMNAR_COL, &sColumnarCol );
// let aggregate expression know that it is working with that columnar attribute
tExprCol.m_pExpr->Command ( SPH_EXPR_SET_COLUMNAR_COL, &sColumnarCol );
return true;
}
}
return false;
}
void QueueCreator_c::UpdateAggregateDependencies ( CSphColumnInfo & tExprCol )
{
/// update aggregate dependencies (e.g. SELECT 1+attr f1, min(f1), ...)
StrVec_t dDependentCols;
tExprCol.m_pExpr->Command ( SPH_EXPR_GET_DEPENDENT_COLS, &dDependentCols );
FetchDependencyChains ( dDependentCols );
for ( const auto & sAttr : dDependentCols )
{
auto pDep = const_cast<CSphColumnInfo *>( m_pSorterSchema->GetAttr ( sAttr.cstr() ) );
assert(pDep);
bool bJoinedAttr = ExprHasJoinPrefix ( pDep->m_sName, m_tSettings.m_pJoinArgs.get() ) && pDep->m_eStage==SPH_EVAL_SORTER && pDep->m_tLocator.m_bDynamic && !pDep->m_pExpr;
if ( pDep->m_eStage>tExprCol.m_eStage && !bJoinedAttr )
pDep->m_eStage = tExprCol.m_eStage;
}
}
void QueueCreator_c::AddAttrsFromSchema ( const ISphSchema & tSchema, const CSphString & sPrefix )
{
for ( int i=0; i<tSchema.GetAttrsCount(); i++ )
{
CSphString sAttrName = tSchema.GetAttr(i).m_sName;
sAttrName.SetSprintf ( "%s%s", sPrefix.scstr(), sAttrName.cstr() );
m_hQueryDups.Add ( sAttrName );
m_hQueryColumns.Add ( sAttrName );
}
}
void QueueCreator_c::ModifyExprForJoin ( CSphColumnInfo & tExprCol, const CSphString & sExpr )
{
// even if it's over a join expr, it references another attr, so don't remove the expression
if ( tExprCol.m_eAggrFunc!=SPH_AGGR_NONE )
return;
// check expr and its alias
if ( !ExprHasJoinPrefix ( tExprCol.m_sName, m_tSettings.m_pJoinArgs.get() ) && !ExprHasJoinPrefix ( sExpr, m_tSettings.m_pJoinArgs.get() ) )
return;
// we receive already precalculated JSON field expressions from JOIN
// we don't need to evaluate them once again
tExprCol.m_eAttrType = sphPlainAttrToPtrAttr ( tExprCol.m_eAttrType );
tExprCol.m_pExpr = nullptr;
tExprCol.m_eStage = SPH_EVAL_SORTER;
tExprCol.m_uAttrFlags |= CSphColumnInfo::ATTR_JOINED;
}
void QueueCreator_c::SelectExprEvalStage ( CSphColumnInfo & tExprCol )
{
// is this expression used in filter?
// OPTIMIZE? hash filters and do hash lookups?
if ( tExprCol.m_eAttrType==SPH_ATTR_JSON_FIELD )
return;
if ( tExprCol.IsJoined() )
return;
ARRAY_FOREACH ( i, m_tQuery.m_dFilters )
if ( m_tQuery.m_dFilters[i].m_sAttrName==tExprCol.m_sName )
{
// is this a hack?
// m_bWeight is computed after EarlyReject() get called
// that means we can't evaluate expressions with WEIGHT() in prefilter phase
if ( tExprCol.m_bWeight )
{
tExprCol.m_eStage = SPH_EVAL_PRESORT; // special, weight filter ( short cut )
break;
}
// so we are about to add a filter condition,
// but it might depend on some preceding columns (e.g. SELECT 1+attr f1 ... WHERE f1>5)
// lets detect those and move them to prefilter \ presort phase too
StrVec_t dDependentCols;
tExprCol.m_pExpr->Command ( SPH_EXPR_GET_DEPENDENT_COLS, &dDependentCols );
SelectStageForColumnarExpr(tExprCol);
FetchDependencyChains ( dDependentCols );
PropagateEvalStage ( tExprCol, dDependentCols );
break;
}
}
bool QueueCreator_c::ParseQueryItem ( const CSphQueryItem & tItem )
{
assert ( m_pSorterSchema );
const CSphString & sExpr = tItem.m_sExpr;
bool bIsCount = IsCount(sExpr);
m_bHasCount |= bIsCount;
if ( sExpr=="*" )
{
m_bHaveStar = true;
CSphString sPrefix;
if ( m_tSettings.m_pJoinArgs )
sPrefix.SetSprintf ( "%s.", m_tSettings.m_pJoinArgs->m_sIndex1.cstr() );
AddAttrsFromSchema ( m_tSettings.m_tSchema, sPrefix );
if ( m_tSettings.m_pJoinArgs )
{
sPrefix.SetSprintf ( "%s.", m_tSettings.m_pJoinArgs->m_sIndex2.cstr() );
AddAttrsFromSchema ( m_tSettings.m_pJoinArgs->m_tJoinedSchema, sPrefix );
}
}
// for now, just always pass "plain" attrs from index to sorter; they will be filtered on searchd level
int iAttrIdx = m_tSettings.m_tSchema.GetAttrIndex ( sExpr.cstr() );
bool bColumnar = iAttrIdx>=0 && m_tSettings.m_tSchema.GetAttr(iAttrIdx).IsColumnar();
bool bPlainAttr = ( ( sExpr=="*" || ( iAttrIdx>=0 && tItem.m_eAggrFunc==SPH_AGGR_NONE && !bColumnar ) ) &&
( tItem.m_sAlias.IsEmpty() || tItem.m_sAlias==tItem.m_sExpr ) );
if ( iAttrIdx>=0 )
{
ESphAttr eAttr = m_tSettings.m_tSchema.GetAttr ( iAttrIdx ).m_eAttrType;
if ( eAttr==SPH_ATTR_STRING || eAttr==SPH_ATTR_STRINGPTR
|| eAttr==SPH_ATTR_UINT32SET || eAttr==SPH_ATTR_INT64SET )
{
if ( tItem.m_eAggrFunc!=SPH_AGGR_NONE )
return Err ( "can not aggregate non-scalar attribute '%s'", tItem.m_sExpr.cstr() );
if ( !bPlainAttr && !bColumnar && ( eAttr==SPH_ATTR_STRING || eAttr==SPH_ATTR_STRINGPTR ) )
{
bPlainAttr = true;
for ( const auto & i : m_tQuery.m_dItems )
if ( sExpr==i.m_sAlias )
bPlainAttr = false;
}
}
}
if ( bPlainAttr || IsGroupby ( sExpr ) || bIsCount )
{
if ( sExpr!="*" && !tItem.m_sAlias.IsEmpty() )
{
m_hQueryDups.Add ( tItem.m_sAlias );
if ( bPlainAttr )
m_hQueryColumns.Add ( tItem.m_sExpr );
}
m_bHasGroupByExpr = IsGroupby ( sExpr );
return true;
}
if ( IsKnnDist(sExpr) && m_pSorterSchema->GetAttrIndex ( GetKnnDistAttrName() )<0 )
return Err ( "KNN_DIST() is only allowed for KNN() queries" );
// not an attribute? must be an expression, and must be aliased by query parser
assert ( !tItem.m_sAlias.IsEmpty() );
// tricky part
// we might be fed with precomputed matches, but it's all or nothing
// the incoming match either does not have anything computed, or it has everything
// unless it is a JOIN - then we have partially precomputed matches
int iSorterAttr = m_pSorterSchema->GetAttrIndex ( tItem.m_sAlias.cstr() );
if ( iSorterAttr>=0 )
{
if ( m_hQueryDups[tItem.m_sAlias] )
{
bool bJoined = !!(m_pSorterSchema->GetAttr(iSorterAttr).m_uAttrFlags & CSphColumnInfo::ATTR_JOINED);
if ( bColumnar || bJoined ) // we might have several similar aliases for columnar attributes (and they are not plain attrs but expressions)
return true;
else
return Err ( "alias '%s' must be unique (conflicts with another alias)", tItem.m_sAlias.cstr() );
}
}
// a new and shiny expression, lets parse
CSphColumnInfo tExprCol ( tItem.m_sAlias.cstr(), SPH_ATTR_NONE );
DWORD uQueryPackedFactorFlags = SPH_FACTOR_DISABLE;
bool bHasZonespanlist = false;
bool bExprsNeedDocids = false;
ExprParseArgs_t tExprParseArgs;
tExprParseArgs.m_pAttrType = &tExprCol.m_eAttrType;
tExprParseArgs.m_pUsesWeight = &tExprCol.m_bWeight;
tExprParseArgs.m_pProfiler = m_tSettings.m_pProfiler;
tExprParseArgs.m_eCollation = m_tQuery.m_eCollation;
tExprParseArgs.m_pHook = m_tSettings.m_pHook;
tExprParseArgs.m_pZonespanlist = &bHasZonespanlist;
tExprParseArgs.m_pPackedFactorsFlags = &uQueryPackedFactorFlags;
tExprParseArgs.m_pEvalStage = &tExprCol.m_eStage;
tExprParseArgs.m_pStoredField = &tExprCol.m_uFieldFlags;
tExprParseArgs.m_pNeedDocIds = &bExprsNeedDocids;
// tricky bit
// GROUP_CONCAT() adds an implicit TO_STRING() conversion on top of its argument
// and then the aggregate operation simply concatenates strings as matches arrive
// ideally, we would instead pass ownership of the expression to G_C() implementation
// and also the original expression type, and let the string conversion happen in G_C() itself
// but that ideal route seems somewhat more complicated in the current architecture
if ( tItem.m_eAggrFunc==SPH_AGGR_CAT )
{
CSphString sExpr2;
sExpr2.SetSprintf ( "TO_STRING(%s)", sExpr.cstr() );
tExprCol.m_pExpr = sphExprParse ( sExpr2.cstr(), *m_pSorterSchema, m_tSettings.m_pJoinArgs ? &(m_tSettings.m_pJoinArgs->m_sIndex2) : nullptr, m_sError, tExprParseArgs );
}
else
tExprCol.m_pExpr = sphExprParse ( sExpr.cstr(), *m_pSorterSchema, m_tSettings.m_pJoinArgs ? &(m_tSettings.m_pJoinArgs->m_sIndex2) : nullptr, m_sError, tExprParseArgs );
m_uPackedFactorFlags |= uQueryPackedFactorFlags;
m_bZonespanlist |= bHasZonespanlist;
m_bExprsNeedDocids |= bExprsNeedDocids;
tExprCol.m_eAggrFunc = tItem.m_eAggrFunc;
tExprCol.m_iIndex = iSorterAttr>= 0 ? m_pSorterSchema->GetAttrIndexOriginal ( tItem.m_sAlias.cstr() ) : -1;
if ( !tExprCol.m_pExpr )
return Err ( "parse error: %s", m_sError.cstr() );
if ( !SetupAggregateExpr ( tExprCol, tItem.m_sExpr, uQueryPackedFactorFlags ) )
return false;
ModifyExprForJoin ( tExprCol, tItem.m_sExpr );
// postpone aggregates, add non-aggregates
if ( tExprCol.m_eAggrFunc==SPH_AGGR_NONE )
{
SelectExprEvalStage(tExprCol);
// add it!
// NOTE, "final" stage might need to be fixed up later
// we'll do that when parsing sorting clause
m_pSorterSchema->AddAttr ( tExprCol, true );
// remove original column after new attribute added or shadows this one
if ( iSorterAttr>=0 )
m_pSorterSchema->RemoveStaticAttr ( iSorterAttr );
}
else // some aggregate
{
bool bColumnarAggregate = SetupColumnarAggregates(tExprCol);
bool bJoinAggregate = ExprHasJoinPrefix ( tExprCol.m_sName, m_tSettings.m_pJoinArgs.get() );
// columnar aggregates have their own code path; no need to calculate them in presort
// and aggregates over joined attrs are calculated in the join sorter
tExprCol.m_eStage = ( bColumnarAggregate || bJoinAggregate ) ? SPH_EVAL_SORTER : SPH_EVAL_PRESORT;
m_pSorterSchema->AddAttr ( tExprCol, true );
m_hExtra.Add ( tExprCol.m_sName );
if ( !bColumnarAggregate )
UpdateAggregateDependencies ( tExprCol );
// remove original column after new attribute added or shadows this one
if ( iSorterAttr>=0 )
m_pSorterSchema->RemoveStaticAttr ( iSorterAttr );
}
m_hQueryDups.Add ( tExprCol.m_sName );
m_hQueryColumns.Add ( tExprCol.m_sName );
// need to add all dependent columns for post limit expressions
if ( tExprCol.m_eStage==SPH_EVAL_POSTLIMIT && tExprCol.m_pExpr )
{
StrVec_t dCur;
tExprCol.m_pExpr->Command ( SPH_EXPR_GET_DEPENDENT_COLS, &dCur );
ARRAY_FOREACH ( j, dCur )
{
const CSphColumnInfo * pCol = m_pSorterSchema->GetAttr ( dCur[j].cstr() );
if ( pCol && pCol->m_pExpr )
pCol->m_pExpr->Command ( SPH_EXPR_GET_DEPENDENT_COLS, &dCur );
}
dCur.Uniq();
for ( const auto & sAttr : dCur )
{
const CSphColumnInfo * pDep = m_pSorterSchema->GetAttr ( sAttr.cstr() );
assert(pDep);
m_hQueryColumns.Add ( pDep->m_sName );
}
}
return true;
}
bool QueueCreator_c::ReplaceWithColumnarItem ( const CSphString & sAttr, ESphEvalStage eStage )
{
const CSphColumnInfo * pAttr = m_pSorterSchema->GetAttr ( sAttr.cstr() );
if ( !pAttr->IsColumnar() )
return true;
m_hQueryDups.Delete(sAttr);
CSphQueryItem tItem;
tItem.m_sExpr = tItem.m_sAlias = sAttr;
if ( !ParseQueryItem ( tItem ) )
return false;
// force stage
const CSphColumnInfo * pNewAttr = m_pSorterSchema->GetAttr ( sAttr.cstr() );
const_cast<CSphColumnInfo *>(pNewAttr)->m_eStage = Min ( pNewAttr->m_eStage, eStage );
return true;
}
// Test for @geodist and setup, if any
bool QueueCreator_c::MaybeAddGeodistColumn ()
{
if ( !m_tQuery.m_bGeoAnchor || m_pSorterSchema->GetAttrIndex ( "@geodist" )>=0 )
return true;
// replace columnar lat/lon with expressions before adding geodist
if ( !ReplaceWithColumnarItem ( m_tQuery.m_sGeoLatAttr, SPH_EVAL_PREFILTER ) ) return false;
if ( !ReplaceWithColumnarItem ( m_tQuery.m_sGeoLongAttr, SPH_EVAL_PREFILTER ) ) return false;
auto pExpr = CreateExprGeodist ( m_tQuery, *m_pSorterSchema, m_sError );
if ( !pExpr )
return false;
CSphColumnInfo tCol ( "@geodist", SPH_ATTR_FLOAT );
tCol.m_pExpr = pExpr; // takes ownership, no need to for explicit pExpr release
tCol.m_eStage = SPH_EVAL_PREFILTER; // OPTIMIZE? actual stage depends on usage
m_pSorterSchema->AddAttr ( tCol, true );
m_hExtra.Add ( tCol.m_sName );
m_hQueryAttrs.Add ( tCol.m_sName );
return true;
}
// Test for @expr and setup, if any
bool QueueCreator_c::MaybeAddExprColumn ()
{
if ( m_tQuery.m_eSort!=SPH_SORT_EXPR || m_pSorterSchema->GetAttrIndex ( "@expr" )>=0 )
return true;
CSphColumnInfo tCol ( "@expr", SPH_ATTR_FLOAT ); // enforce float type for backwards compatibility
// (i.e. too lazy to fix those tests right now)
bool bHasZonespanlist;
ExprParseArgs_t tExprArgs;
tExprArgs.m_pProfiler = m_tSettings.m_pProfiler;
tExprArgs.m_eCollation = m_tQuery.m_eCollation;
tExprArgs.m_pZonespanlist = &bHasZonespanlist;
tCol.m_pExpr = sphExprParse ( m_tQuery.m_sSortBy.cstr (), *m_pSorterSchema, m_tSettings.m_pJoinArgs ? &(m_tSettings.m_pJoinArgs->m_sIndex2) : nullptr, m_sError, tExprArgs );
if ( !tCol.m_pExpr )
return false;
m_bZonespanlist |= bHasZonespanlist;
tCol.m_eStage = SPH_EVAL_PRESORT;
m_pSorterSchema->AddAttr ( tCol, true );
m_hQueryAttrs.Add ( tCol.m_sName );
return true;
}
bool QueueCreator_c::AddStoredFieldExpressions()
{
for ( int i = 0; i<m_tSettings.m_tSchema.GetFieldsCount(); i++ )
{
const CSphColumnInfo & tField = m_tSettings.m_tSchema.GetField(i);
if ( !(tField.m_uFieldFlags & CSphColumnInfo::FIELD_STORED) )
continue;
CSphQueryItem tItem;
tItem.m_sExpr = tField.m_sName;
tItem.m_sAlias = tField.m_sName;
if ( !ParseQueryItem ( tItem ) )
return false;
}
return true;
}
bool QueueCreator_c::AddColumnarAttributeExpressions()
{
for ( int i = 0; i<m_tSettings.m_tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = m_tSettings.m_tSchema.GetAttr(i);
const CSphColumnInfo * pSorterAttr = m_pSorterSchema->GetAttr ( tAttr.m_sName.cstr() );
if ( !tAttr.IsColumnar() || ( pSorterAttr && !pSorterAttr->IsColumnar() ) )
continue;
m_hQueryDups.Delete ( tAttr.m_sName );
CSphQueryItem tItem;
tItem.m_sExpr = tItem.m_sAlias = tAttr.m_sName;
if ( !ParseQueryItem ( tItem ) )
return false;
}
return true;
}
// Add computed items
bool QueueCreator_c::MaybeAddExpressionsFromSelectList ()
{
// expressions from select items
if ( !m_tSettings.m_bComputeItems )
return true;
if ( !m_tQuery.m_dItems.all_of ( [&] ( const CSphQueryItem & v ) { return ParseQueryItem ( v ); } ))
return false;
if ( m_bHaveStar )
{
if ( !AddColumnarAttributeExpressions() )
return false;
if ( !AddStoredFieldExpressions() )
return false;
}
return true;
}
bool QueueCreator_c::AddExpressionsForUpdates()
{
if ( !m_tSettings.m_pCollection )
return true;
const CSphColumnInfo * pOldDocId = m_pSorterSchema->GetAttr ( sphGetDocidName() );
if ( !pOldDocId->IsColumnar() && !pOldDocId->IsColumnarExpr() )
return true;
if ( pOldDocId->IsColumnar() )
{
// add columnar id expressions to update queue. otherwise we won't be able to fetch docids which are needed to run updates/deletes
CSphQueryItem tItem;
tItem.m_sExpr = tItem.m_sAlias = sphGetDocidName();
if ( !ParseQueryItem ( tItem ) )
return false;
}
auto * pDocId = const_cast<CSphColumnInfo *> ( m_pSorterSchema->GetAttr ( sphGetDocidName() ) );
assert(pDocId);
pDocId->m_eStage = SPH_EVAL_PRESORT; // update/delete queues don't have real Finalize(), so just evaluate it at presort stage
return true;
}
bool QueueCreator_c::IsJoinAttr ( const CSphString & sAttr ) const
{
if ( !m_tSettings.m_pJoinArgs )
return false;
CSphString sPrefix;
sPrefix.SetSprintf ( "%s.", m_tSettings.m_pJoinArgs->m_sIndex2.cstr() );
return sAttr.Begins ( sPrefix.cstr() );
}
void QueueCreator_c::ReplaceJsonGroupbyWithStrings ( CSphString & sJsonGroupBy )
{
auto AddColumn = [this] ( const CSphColumnInfo & tCol )
{
m_pSorterSchema->AddAttr ( tCol, true );
m_hQueryColumns.Add ( tCol.m_sName );
};
if ( m_tGroupSorterSettings.m_bJson )
{
bool bJoinAttr = IsJoinAttr ( m_tQuery.m_sGroupBy );
sJsonGroupBy = SortJsonInternalSet ( m_tQuery.m_sGroupBy );
if ( !m_pSorterSchema->GetAttr ( sJsonGroupBy.cstr() ) )
{
CSphColumnInfo tGroupbyStr ( sJsonGroupBy.cstr() );
if ( bJoinAttr )
tGroupbyStr.m_eAttrType = SPH_ATTR_STRINGPTR;
else
tGroupbyStr.m_eAttrType = SPH_ATTR_JSON_FIELD;
tGroupbyStr.m_eStage = SPH_EVAL_SORTER;
AddColumn ( tGroupbyStr );
}
if ( bJoinAttr )
{
// we can't do grouping directly on joined JSON fields
// so we need to change the grouper
// fixme! this will not work on stuff that generates multiple groupby keys (like JSON arrays)
const CSphColumnInfo * pRemapped = m_pSorterSchema->GetAttr ( sJsonGroupBy.cstr() );
assert(pRemapped);
m_tGroupSorterSettings.m_pGrouper = CreateGrouperString ( pRemapped->m_tLocator, m_tQuery.m_eCollation );
m_tGroupSorterSettings.m_bJson = false;
}
}
else if ( m_tQuery.m_eGroupFunc==SPH_GROUPBY_MULTIPLE && m_bJoinedGroupSort )
{
bool bGrouperChanged = false;
ARRAY_FOREACH ( i, m_dGroupColumns )
{
const CSphColumnInfo & tAttr = m_pSorterSchema->GetAttr ( m_dGroupColumns[i].first );
bool bJoinAttr = IsJoinAttr ( tAttr.m_sName );
bool bJson = tAttr.m_eAttrType==SPH_ATTR_JSON_PTR || tAttr.m_eAttrType==SPH_ATTR_JSON_FIELD_PTR;
if ( bJoinAttr && bJson )
{
sJsonGroupBy = SortJsonInternalSet ( m_dGroupJsonAttrs[i] );
if ( !m_pSorterSchema->GetAttr ( sJsonGroupBy.cstr() ) )
{
CSphColumnInfo tGroupbyStr ( sJsonGroupBy.cstr() );
tGroupbyStr.m_eAttrType = SPH_ATTR_STRINGPTR;
tGroupbyStr.m_eStage = SPH_EVAL_SORTER;
AddColumn ( tGroupbyStr );
}
m_dGroupColumns[i].first = m_pSorterSchema->GetAttrIndex ( sJsonGroupBy.cstr() );
m_dGroupJsonAttrs[i] = "";
bGrouperChanged = true;
}
}
if ( bGrouperChanged )
{
CSphVector<CSphColumnInfo> dAttrs;
VecRefPtrs_t<ISphExpr *> dJsonKeys;
ARRAY_FOREACH ( i, m_dGroupColumns )
{
dAttrs.Add ( m_pSorterSchema->GetAttr ( m_dGroupColumns[i].first ) );
const CSphString & sJsonExpr = m_dGroupJsonAttrs[i];
if ( !sJsonExpr.IsEmpty() )
{
ExprParseArgs_t tExprArgs;
dJsonKeys.Add ( sphExprParse ( sJsonExpr.cstr(), *m_pSorterSchema, m_tSettings.m_pJoinArgs ? &(m_tSettings.m_pJoinArgs->m_sIndex2) : nullptr, m_sError, tExprArgs ) );
}
else
dJsonKeys.Add(nullptr);
}
m_tGroupSorterSettings.m_pGrouper = CreateGrouperMulti ( dAttrs, std::move(dJsonKeys), m_tQuery.m_eCollation );
}
}
}
bool QueueCreator_c::MaybeAddGroupbyMagic ( bool bGotDistinct )
{
CSphString sJsonGroupBy;
// now let's add @groupby etc. if needed
if ( m_bGotGroupby && m_pSorterSchema->GetAttrIndex ( "@groupby" )<0 )
{
ESphAttr eGroupByResult = ( !m_tGroupSorterSettings.m_bImplicit )
? m_tGroupSorterSettings.m_pGrouper->GetResultType ()
: SPH_ATTR_INTEGER; // implicit do not have grouper
// all FACET group by should be the widest possible type
if ( m_tQuery.m_bFacet || m_tQuery.m_bFacetHead || m_bMulti )
eGroupByResult = SPH_ATTR_BIGINT;
CSphColumnInfo tGroupby ( "@groupby", eGroupByResult );
CSphColumnInfo tCount ( "@count", SPH_ATTR_BIGINT );
tGroupby.m_eStage = SPH_EVAL_SORTER;
tCount.m_eStage = SPH_EVAL_SORTER;
auto AddColumn = [this] ( const CSphColumnInfo & tCol )
{
m_pSorterSchema->AddAttr ( tCol, true );
m_hQueryColumns.Add ( tCol.m_sName );
};
AddColumn ( tGroupby );
AddColumn ( tCount );
if ( bGotDistinct )
{
CSphColumnInfo tDistinct ( "@distinct", SPH_ATTR_INTEGER );
tDistinct.m_eStage = SPH_EVAL_SORTER;
AddColumn ( tDistinct );
}
// add @groupbystr last in case we need to skip it on sending (like @int_attr_*)
ReplaceJsonGroupbyWithStrings ( sJsonGroupBy );
}
#define LOC_CHECK( _cond, _msg ) if (!(_cond)) { m_sError = "invalid schema: " _msg; return false; }
int iGroupby = m_pSorterSchema->GetAttrIndex ( "@groupby" );
if ( iGroupby>=0 )
{
m_tGroupSorterSettings.m_bDistinct = bGotDistinct;
m_tGroupSorterSettings.m_tLocGroupby = m_pSorterSchema->GetAttr ( iGroupby ).m_tLocator;
LOC_CHECK ( m_tGroupSorterSettings.m_tLocGroupby.m_bDynamic, "@groupby must be dynamic" );
int iCount = m_pSorterSchema->GetAttrIndex ( "@count" );
LOC_CHECK ( iCount>=0, "missing @count" );
m_tGroupSorterSettings.m_tLocCount = m_pSorterSchema->GetAttr ( iCount ).m_tLocator;
LOC_CHECK ( m_tGroupSorterSettings.m_tLocCount.m_bDynamic, "@count must be dynamic" );
int iDistinct = m_pSorterSchema->GetAttrIndex ( "@distinct" );
if ( bGotDistinct )
{
LOC_CHECK ( iDistinct>=0, "missing @distinct" );
m_tGroupSorterSettings.m_tLocDistinct = m_pSorterSchema->GetAttr ( iDistinct ).m_tLocator;
LOC_CHECK ( m_tGroupSorterSettings.m_tLocDistinct.m_bDynamic, "@distinct must be dynamic" );
}
else
LOC_CHECK ( iDistinct<=0, "unexpected @distinct" );
int iGroupbyStr = m_pSorterSchema->GetAttrIndex ( sJsonGroupBy.cstr() );
if ( iGroupbyStr>=0 )
m_tGroupSorterSettings.m_tLocGroupbyStr = m_pSorterSchema->GetAttr ( iGroupbyStr ).m_tLocator;
}
if ( m_bHasCount )
LOC_CHECK ( m_pSorterSchema->GetAttrIndex ( "@count" )>=0, "Count(*) or @count is queried, but not available in the schema" );
#undef LOC_CHECK
return true;
}
bool QueueCreator_c::AddKNNDistColumn()
{
if ( m_tQuery.m_sKNNAttr.IsEmpty() || m_pSorterSchema->GetAttrIndex ( GetKnnDistAttrName() )>=0 )
return true;
auto pAttr = m_pSorterSchema->GetAttr ( m_tQuery.m_sKNNAttr.cstr() );
if ( !pAttr )
{
m_sError.SetSprintf ( "requested KNN search attribute '%s' not found", m_tQuery.m_sKNNAttr.cstr() );
return false;
}
if ( !pAttr->IsIndexedKNN() )
{
m_sError.SetSprintf ( "KNN index not enabled for attribute '%s'", m_tQuery.m_sKNNAttr.cstr() );
return false;
}
if ( pAttr->m_tKNN.m_iDims!=m_tQuery.m_dKNNVec.GetLength() )
{
m_sError.SetSprintf ( "KNN index '%s' requires a vector of %d entries; %d entries specified", m_tQuery.m_sKNNAttr.cstr(), pAttr->m_tKNN.m_iDims, m_tQuery.m_dKNNVec.GetLength() );
return false;
}
CSphColumnInfo tKNNDist ( GetKnnDistAttrName(), SPH_ATTR_FLOAT );
tKNNDist.m_eStage = SPH_EVAL_PRESORT;
tKNNDist.m_pExpr = CreateExpr_KNNDist ( m_tQuery.m_dKNNVec, *pAttr );
m_pSorterSchema->AddAttr ( tKNNDist, true );
m_hQueryColumns.Add ( tKNNDist.m_sName );
return true;
}
bool QueueCreator_c::ParseJoinExpr ( CSphColumnInfo & tExprCol, const CSphString & sAttr, const CSphString & sExpr ) const
{
tExprCol = CSphColumnInfo ( sAttr.cstr() );
ExprParseArgs_t tExprParseArgs;
tExprParseArgs.m_pAttrType = &tExprCol.m_eAttrType;
tExprParseArgs.m_pProfiler = m_tSettings.m_pProfiler;
tExprParseArgs.m_eCollation = m_tQuery.m_eCollation;
tExprCol.m_eStage = SPH_EVAL_PRESORT;
tExprCol.m_pExpr = sphExprParse ( sExpr.cstr(), *m_pSorterSchema, m_tSettings.m_pJoinArgs ? &(m_tSettings.m_pJoinArgs->m_sIndex2) : nullptr, m_sError, tExprParseArgs );
tExprCol.m_uAttrFlags |= CSphColumnInfo::ATTR_JOINED;
return !!tExprCol.m_pExpr;
}
bool QueueCreator_c::AddJsonJoinOnFilter ( const CSphString & sAttr1, const CSphString & sAttr2, ESphAttr eTypeCast )
{
const CSphColumnInfo * pAttr = m_pSorterSchema->GetAttr ( sAttr1.cstr() );
if ( pAttr )
{
if ( pAttr->m_pExpr && pAttr->m_pExpr->UsesDocstore() )
return true;
const_cast<CSphColumnInfo *>(pAttr)->m_eStage = Min ( pAttr->m_eStage, SPH_EVAL_PRESORT );
return true;
}
if ( !sphJsonNameSplit ( sAttr1.cstr(), nullptr ) )
{
const CSphColumnInfo * pField = m_pSorterSchema->GetField ( sAttr1.cstr() );
if ( pField && ( pField->m_uFieldFlags & CSphColumnInfo::FIELD_STORED ) )
m_sError.SetSprintf ( "Unable to perform join on a stored field '%s.%s'", m_tSettings.m_pJoinArgs->m_sIndex1.cstr(), sAttr1.cstr() );
else
m_sError.SetSprintf ( "Unable to perform join on '%s'", sAttr1.cstr() );
return false;
}
CSphColumnInfo tExprCol;
if ( !ParseJoinExpr ( tExprCol, sAttr1, sAttr1 ) )
return false;
const auto & tSchema = m_tSettings.m_pJoinArgs->m_tJoinedSchema;
// convert JSON fields to join attr type
if ( tExprCol.m_eAttrType==SPH_ATTR_JSON_FIELD )
{
// try to determine type if it was not explicitly specified
if ( eTypeCast==SPH_ATTR_NONE )
{
auto * pJoinAttr = tSchema.GetAttr ( sAttr2.cstr() );
if ( !pJoinAttr )
{
if ( sphJsonNameSplit ( sAttr2.cstr() ) )
m_sError.SetSprintf ( "use implicit type conversion on join-on attribute '%s'", sAttr2.cstr() );
else
m_sError.SetSprintf ( "join-on attribute '%s' not found", sAttr2.cstr() );
return false;
}
eTypeCast = pJoinAttr->m_eAttrType;
}
CSphString sConverted;
switch ( eTypeCast )
{
case SPH_ATTR_STRING:
sConverted.SetSprintf ( "TO_STRING(%s)", sAttr1.cstr() );
break;
case SPH_ATTR_FLOAT:
sConverted.SetSprintf ( "DOUBLE(%s)", sAttr1.cstr() );
break;
default:
sConverted.SetSprintf ( "BIGINT(%s)", sAttr1.cstr() );
break;
}
if ( !ParseJoinExpr ( tExprCol, sAttr1, sConverted ) )
return false;
}
m_pSorterSchema->AddAttr ( tExprCol, true );
return true;
}
bool QueueCreator_c::AddColumnarJoinOnFilter ( const CSphString & sAttr )
{
const CSphColumnInfo * pAttr = m_pSorterSchema->GetAttr ( sAttr.cstr() );
if ( pAttr && pAttr->m_pExpr && !pAttr->m_pExpr->UsesDocstore() )
{
const_cast<CSphColumnInfo *>(pAttr)->m_eStage = Min ( pAttr->m_eStage, SPH_EVAL_PRESORT );
return true;
}
return ReplaceWithColumnarItem ( sAttr, SPH_EVAL_PRESORT );
}
bool QueueCreator_c::AddJoinAttrs()
{
if ( !m_tSettings.m_pJoinArgs )
return true;
const auto & tSchema = m_tSettings.m_pJoinArgs->m_tJoinedSchema;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
if ( !sphIsInternalAttr ( tSchema.GetAttr(i).m_sName ) )
{
CSphColumnInfo tAttr = tSchema.GetAttr(i);
tAttr.m_sName.SetSprintf ( "%s.%s", m_tSettings.m_pJoinArgs->m_sIndex2.cstr(), tAttr.m_sName.cstr() );
tAttr.m_eAttrType = sphPlainAttrToPtrAttr ( tAttr.m_eAttrType );
tAttr.m_tLocator.Reset();
tAttr.m_eStage = SPH_EVAL_SORTER;
tAttr.m_uAttrFlags &= ~( CSphColumnInfo::ATTR_COLUMNAR | CSphColumnInfo::ATTR_COLUMNAR_HASHES );
tAttr.m_uAttrFlags |= CSphColumnInfo::ATTR_JOINED;
m_pSorterSchema->AddAttr ( tAttr, true );
m_hQueryDups.Add ( tAttr.m_sName );
m_hQueryColumns.Add ( tAttr.m_sName );
}
for ( int i = 0; i < tSchema.GetFieldsCount(); i++ )
{
const CSphColumnInfo & tField = tSchema.GetField(i);
if ( tField.m_uFieldFlags & CSphColumnInfo::FIELD_STORED )
{
CSphColumnInfo tAttr;
tAttr.m_sName.SetSprintf ( "%s.%s", m_tSettings.m_pJoinArgs->m_sIndex2.cstr(), tField.m_sName.cstr() );
tAttr.m_eAttrType = SPH_ATTR_STRINGPTR;
tAttr.m_tLocator.Reset();
tAttr.m_eStage = SPH_EVAL_SORTER;
tAttr.m_uAttrFlags = CSphColumnInfo::ATTR_JOINED;
m_pSorterSchema->AddAttr ( tAttr, true );
m_hQueryDups.Add ( tAttr.m_sName );
m_hQueryColumns.Add ( tAttr.m_sName );
}
}
return true;
}
static ESphAttr FilterType2AttrType ( ESphFilter eFilter )
{
switch ( eFilter )
{
case SPH_FILTER_FLOATRANGE:
return SPH_ATTR_FLOAT;
case SPH_FILTER_STRING:
case SPH_FILTER_STRING_LIST:
return SPH_ATTR_STRINGPTR;
default:
return SPH_ATTR_BIGINT;
}
}
bool QueueCreator_c::CheckJoinOnTypeCast ( const CSphString & sIdx, const CSphString & sAttr, ESphAttr eTypeCast )
{
if ( eTypeCast==SPH_ATTR_NONE )
return true;
if ( !sphJsonNameSplit ( sAttr.cstr() ) )
{
m_sError.SetSprintf ( "Explicit type conversion used on non-json attribute '%s.%s'", sIdx.cstr(), sAttr.cstr() );
return false;
}
return true;
}
bool QueueCreator_c::AddJoinFilterAttrs()
{
if ( !m_tSettings.m_pJoinArgs )
return true;
const CSphString & sLeftIndex = m_tSettings.m_pJoinArgs->m_sIndex1;
const CSphString & sRightIndex = m_tSettings.m_pJoinArgs->m_sIndex2;
for ( const auto & i : m_tQuery.m_dOnFilters )
{
if ( !CheckJoinOnTypeCast ( i.m_sIdx1, i.m_sAttr1, i.m_eTypeCast1 ) ) return false;
if ( !CheckJoinOnTypeCast ( i.m_sIdx2, i.m_sAttr2, i.m_eTypeCast2 ) ) return false;
ESphAttr eTypeCast = i.m_eTypeCast1!=SPH_ATTR_NONE ? i.m_eTypeCast1 : i.m_eTypeCast2;
if ( i.m_sIdx1==sLeftIndex )
{
if ( !AddJsonJoinOnFilter ( i.m_sAttr1, i.m_sAttr2, eTypeCast ) ) return false;
if ( !AddColumnarJoinOnFilter ( i.m_sAttr1 ) ) return false;
}
if ( i.m_sIdx2==sLeftIndex )
{
if ( !AddJsonJoinOnFilter ( i.m_sAttr2, i.m_sAttr1, eTypeCast ) ) return false;
if ( !AddColumnarJoinOnFilter ( i.m_sAttr2 ) ) return false;
}
}
if ( NeedToMoveMixedJoinFilters ( m_tQuery, *m_pSorterSchema ) )
for ( const auto & i : m_tQuery.m_dFilters )
{
const CSphString & sAttr = i.m_sAttrName;
const CSphColumnInfo * pAttr = m_pSorterSchema->GetAttr ( sAttr.cstr() );
if ( pAttr || !sphJsonNameSplit ( sAttr.cstr(), sRightIndex.cstr() ) )
continue;
CSphColumnInfo tExprCol ( sAttr.cstr(), FilterType2AttrType ( i.m_eType ) );
tExprCol.m_eStage = SPH_EVAL_SORTER;
tExprCol.m_uAttrFlags |= CSphColumnInfo::ATTR_JOINED;
m_pSorterSchema->AddAttr ( tExprCol, true );
m_hQueryDups.Add(sAttr);
m_hQueryColumns.Add(sAttr);
}
return true;
}
bool QueueCreator_c::AddNullBitmask()
{
if ( !m_tSettings.m_pJoinArgs || m_tQuery.m_eJoinType!=JoinType_e::LEFT )
return true;
int iNumJoinAttrs = 0;
int iDynamic = 0;
for ( int i = 0; i < m_pSorterSchema->GetAttrsCount(); i++ )
{
const auto & tAttr = m_pSorterSchema->GetAttr(i);
if ( !tAttr.m_tLocator.m_bDynamic )
continue;
iDynamic++;
if ( tAttr.IsJoined() )
iNumJoinAttrs = Max ( iNumJoinAttrs, iDynamic );
}
CSphColumnInfo tAttr ( GetNullMaskAttrName(), DetermineNullMaskType(iNumJoinAttrs) );
tAttr.m_eStage = SPH_EVAL_SORTER;
m_pSorterSchema->AddAttr ( tAttr, true );
m_hQueryDups.Add ( tAttr.m_sName );
m_hQueryColumns.Add ( tAttr.m_sName );
return true;
}
bool QueueCreator_c::CheckHavingConstraints () const
{
if ( m_tSettings.m_pAggrFilter && !m_tSettings.m_pAggrFilter->m_sAttrName.IsEmpty () )
{
if ( !m_bGotGroupby )
return Err ( "can not use HAVING without GROUP BY" );
// should be column named at group by, or it's alias or aggregate
const CSphString & sHaving = m_tSettings.m_pAggrFilter->m_sAttrName;
if ( !IsGroupbyMagic ( sHaving ) )
{
bool bValidHaving = false;
for ( const CSphQueryItem & tItem : m_tQuery.m_dItems )
{
if ( tItem.m_sAlias!=sHaving )
continue;
bValidHaving = ( IsGroupbyMagic ( tItem.m_sExpr ) || tItem.m_eAggrFunc!=SPH_AGGR_NONE );
break;
}
if ( !bValidHaving )
return Err ( "can not use HAVING with attribute not related to GROUP BY" );
}
}
return true;
}
void QueueCreator_c::SetupRemapColJson ( CSphColumnInfo & tRemapCol, CSphMatchComparatorState & tState, CSphVector<ExtraSortExpr_t> & dExtraExprs, int iStateAttr )
{
bool bFunc = dExtraExprs[iStateAttr].m_tKey.m_uMask==0;
tRemapCol.m_eStage = SPH_EVAL_PRESORT;
if ( bFunc )
{
tRemapCol.m_pExpr = dExtraExprs[iStateAttr].m_pExpr;
tRemapCol.m_eAttrType = dExtraExprs[iStateAttr].m_eType;
tState.m_eKeypart[iStateAttr] = Attr2Keypart ( tRemapCol.m_eAttrType );
}
else
tRemapCol.m_pExpr = CreateExprSortJson2String ( tState.m_tLocator[iStateAttr], dExtraExprs[iStateAttr].m_pExpr );
}
const CSphColumnInfo * QueueCreator_c::GetGroupbyStr ( int iAttr, int iNumOldAttrs ) const
{
assert ( m_pSorterSchema );
auto & tSorterSchema = *m_pSorterSchema;
if ( m_tSettings.m_bComputeItems && iAttr>=0 && iAttr<iNumOldAttrs && tSorterSchema.GetAttr(iAttr).m_sName=="@groupby" && m_dGroupColumns.GetLength() )
{
// FIXME!!! add support of multi group by
const CSphColumnInfo & tGroupCol = tSorterSchema.GetAttr ( m_dGroupColumns[0].first );
if ( tGroupCol.m_eAttrType==SPH_ATTR_STRING || tGroupCol.m_eAttrType==SPH_ATTR_STRINGPTR )
return &tGroupCol;
}
return nullptr;
}
void QueueCreator_c::ReplaceGroupbyStrWithExprs ( CSphMatchComparatorState & tState, int iNumOldAttrs )
{
assert ( m_pSorterSchema );
auto & tSorterSchema = *m_pSorterSchema;
for ( int i = 0; i<CSphMatchComparatorState::MAX_ATTRS; i++ )
{
const CSphColumnInfo * pGroupStrBase = GetGroupbyStr ( tState.m_dAttrs[i], iNumOldAttrs );
if ( !pGroupStrBase )
continue;
assert ( tState.m_dAttrs[i]>=0 && tState.m_dAttrs[i]<iNumOldAttrs );
int iRemap = -1;
if ( pGroupStrBase->m_eAttrType==SPH_ATTR_STRINGPTR )
{
// grouping by (columnar) string; and the same string is used in sorting
// correct the locator and change the evaluation stage to PRESORT
iRemap = tSorterSchema.GetAttrIndex ( pGroupStrBase->m_sName.cstr() );
assert ( iRemap>=0 );
const CSphColumnInfo & tAttr = tSorterSchema.GetAttr(iRemap);
const_cast<CSphColumnInfo &>(tAttr).m_eStage = SPH_EVAL_PRESORT;
}
else if ( !pGroupStrBase->IsColumnar() )
{
CSphString sRemapCol;
sRemapCol.SetSprintf ( "%s%s", GetInternalAttrPrefix(), pGroupStrBase->m_sName.cstr() );
iRemap = tSorterSchema.GetAttrIndex ( sRemapCol.cstr() );
if ( iRemap==-1 )
{
CSphColumnInfo tRemapCol ( sRemapCol.cstr(), SPH_ATTR_STRINGPTR );
tRemapCol.m_pExpr = CreateExprSortStringFixup ( pGroupStrBase->m_tLocator );
tRemapCol.m_eStage = SPH_EVAL_PRESORT;
iRemap = tSorterSchema.GetAttrsCount();
tSorterSchema.AddAttr ( tRemapCol, true );
}
}
if ( iRemap!=-1 )
{
tState.m_eKeypart[i] = SPH_KEYPART_STRINGPTR;
tState.m_tLocator[i] = tSorterSchema.GetAttr(iRemap).m_tLocator;
tState.m_dAttrs[i] = iRemap;
tState.m_dRemapped.BitSet ( i );
}
}
}
void QueueCreator_c::ReplaceStaticStringsWithExprs ( CSphMatchComparatorState & tState )
{
assert ( m_pSorterSchema );
auto & tSorterSchema = *m_pSorterSchema;
for ( int i = 0; i<CSphMatchComparatorState::MAX_ATTRS; i++ )
{
if ( tState.m_dRemapped.BitGet ( i ) )
continue;
if ( tState.m_eKeypart[i]!=SPH_KEYPART_STRING )
continue;
int iRemap = -1;
int iAttrId = tState.m_dAttrs[i];
const CSphColumnInfo & tAttr = tSorterSchema.GetAttr(iAttrId);
if ( tAttr.IsColumnar() )
{
CSphString sAttrName = tAttr.m_sName;
tSorterSchema.RemoveStaticAttr(iAttrId);
CSphColumnInfo tRemapCol ( sAttrName.cstr(), SPH_ATTR_STRINGPTR );
tRemapCol.m_eStage = SPH_EVAL_PRESORT;
tRemapCol.m_pExpr = CreateExpr_GetColumnarString ( sAttrName, tAttr.m_uAttrFlags & CSphColumnInfo::ATTR_STORED );
tSorterSchema.AddAttr ( tRemapCol, true );
iRemap = tSorterSchema.GetAttrIndex ( sAttrName.cstr() );
}
else
{
CSphString sRemapCol;
sRemapCol.SetSprintf ( "%s%s", GetInternalAttrPrefix(), tSorterSchema.GetAttr(iAttrId).m_sName.cstr() );
iRemap = tSorterSchema.GetAttrIndex ( sRemapCol.cstr() );
if ( iRemap==-1 )
{
CSphColumnInfo tRemapCol ( sRemapCol.cstr(), SPH_ATTR_STRINGPTR );
tRemapCol.m_eStage = SPH_EVAL_PRESORT;
tRemapCol.m_pExpr = CreateExprSortStringFixup ( tState.m_tLocator[i] );
iRemap = tSorterSchema.GetAttrsCount();
tSorterSchema.AddAttr ( tRemapCol, true );
}
}
tState.m_tLocator[i] = tSorterSchema.GetAttr ( iRemap ).m_tLocator;
tState.m_dAttrs[i] = iRemap;
tState.m_eKeypart[i] = SPH_KEYPART_STRINGPTR;
tState.m_dRemapped.BitSet ( i );
}
}
void QueueCreator_c::ReplaceJsonWithExprs ( CSphMatchComparatorState & tState, CSphVector<ExtraSortExpr_t> & dExtraExprs )
{
assert ( m_pSorterSchema );
auto & tSorterSchema = *m_pSorterSchema;
for ( int i = 0; i<CSphMatchComparatorState::MAX_ATTRS; i++ )
{
if ( tState.m_dRemapped.BitGet ( i ) )
continue;
const CSphString & sKey = dExtraExprs[i].m_tKey.m_sKey;
if ( sKey.IsEmpty() )
continue;
CSphString sRemapCol;
sRemapCol.SetSprintf ( "%s%s", GetInternalAttrPrefix(), sKey.cstr() );
int iRemap = tSorterSchema.GetAttrIndex ( sRemapCol.cstr() );
if ( iRemap==-1 )
{
CSphString sRemapLowercase = sRemapCol;
sRemapLowercase.ToLower();
iRemap = tSorterSchema.GetAttrIndex ( sRemapLowercase.cstr() );
}
if ( iRemap==-1 )
{
CSphColumnInfo tRemapCol ( sRemapCol.cstr(), SPH_ATTR_STRINGPTR );
SetupRemapColJson ( tRemapCol, tState, dExtraExprs, i );
iRemap = tSorterSchema.GetAttrsCount();
ModifyExprForJoin ( tRemapCol, sKey );
tSorterSchema.AddAttr ( tRemapCol, true );
}
tState.m_tLocator[i] = tSorterSchema.GetAttr(iRemap).m_tLocator;
tState.m_dAttrs[i] = iRemap;
tState.m_dRemapped.BitSet ( i );
}
}
void QueueCreator_c::AddColumnarExprsAsAttrs ( CSphMatchComparatorState & tState, CSphVector<ExtraSortExpr_t> & dExtraExprs )
{
assert ( m_pSorterSchema );
auto & tSorterSchema = *m_pSorterSchema;
for ( int i = 0; i<CSphMatchComparatorState::MAX_ATTRS; i++ )
{
if ( tState.m_dRemapped.BitGet ( i ) )
continue;
ISphExpr * pExpr = dExtraExprs[i].m_pExpr;
if ( !pExpr || !pExpr->IsColumnar() )
continue;
const CSphString & sAttrName = tSorterSchema.GetAttr ( tState.m_dAttrs[i] ).m_sName;
CSphColumnInfo tRemapCol ( sAttrName.cstr(), dExtraExprs[i].m_eType );
tRemapCol.m_eStage = SPH_EVAL_PRESORT;
tRemapCol.m_pExpr = pExpr;
tRemapCol.m_pExpr->AddRef();
int iRemap = tSorterSchema.GetAttrsCount();
tSorterSchema.AddAttr ( tRemapCol, true );
// remove initial attribute from m_hExtra
// that way it won't be evaluated twice when it is not in select list
m_hExtra.Delete(sAttrName);
tState.m_tLocator[i] = tSorterSchema.GetAttr ( iRemap ).m_tLocator;
tState.m_dAttrs[i] = iRemap;
tState.m_eKeypart[i] = Attr2Keypart ( dExtraExprs[i].m_eType );
tState.m_dRemapped.BitSet ( i );
}
}
void QueueCreator_c::RemapAttrs ( CSphMatchComparatorState & tState, CSphVector<ExtraSortExpr_t> & dExtraExprs )
{
// we have extra attrs (expressions) that we created while parsing the sort clause
// we couldn't add them to the schema at that stage,
// but now we can. we create attributes, assign internal names and set their expressions
assert ( m_pSorterSchema );
auto & tSorterSchema = *m_pSorterSchema;
int iNumOldAttrs = tSorterSchema.GetAttrsCount();
ReplaceGroupbyStrWithExprs ( tState, iNumOldAttrs );
ReplaceStaticStringsWithExprs ( tState );
ReplaceJsonWithExprs ( tState, dExtraExprs );
AddColumnarExprsAsAttrs ( tState, dExtraExprs );
// need another sort keys add after setup remap
if ( iNumOldAttrs!=tSorterSchema.GetAttrsCount() )
ExtraAddSortkeys ( tState.m_dAttrs );
}
void QueueCreator_c::AddKnnDistSort ( CSphString & sSortBy )
{
if ( m_pSorterSchema->GetAttr ( GetKnnDistAttrName() ) && !strstr ( sSortBy.cstr(), "knn_dist" ) )
sSortBy.SetSprintf ( "knn_dist() asc, %s", sSortBy.cstr() );
}
// matches sorting function
bool QueueCreator_c::SetupMatchesSortingFunc()
{
m_bRandomize = false;
if ( m_tQuery.m_eSort==SPH_SORT_EXTENDED )
{
CSphString sSortBy = m_tQuery.m_sSortBy;
AddKnnDistSort ( sSortBy );
ESortClauseParseResult eRes = sphParseSortClause ( m_tQuery, sSortBy.cstr(), *m_pSorterSchema, m_eMatchFunc, m_tStateMatch, m_dMatchJsonExprs, m_tSettings.m_bComputeItems, m_tSettings.m_pJoinArgs.get(), m_sError );
if ( eRes==SORT_CLAUSE_ERROR )
return false;
if ( eRes==SORT_CLAUSE_RANDOM )
m_bRandomize = true;
ExtraAddSortkeys ( m_tStateMatch.m_dAttrs );
AssignOrderByToPresortStage ( m_tStateMatch.m_dAttrs, CSphMatchComparatorState::MAX_ATTRS );
RemapAttrs ( m_tStateMatch, m_dMatchJsonExprs );
return true;
}
if ( m_tQuery.m_eSort==SPH_SORT_EXPR )
{
m_tStateMatch.m_eKeypart[0] = SPH_KEYPART_INT;
m_tStateMatch.m_tLocator[0] = m_pSorterSchema->GetAttr ( m_pSorterSchema->GetAttrIndex ( "@expr" ) ).m_tLocator;
m_tStateMatch.m_eKeypart[1] = SPH_KEYPART_ROWID;
m_tStateMatch.m_uAttrDesc = 1;
m_eMatchFunc = FUNC_EXPR;
return true;
}
// check sort-by attribute
if ( m_tQuery.m_eSort!=SPH_SORT_RELEVANCE )
{
int iSortAttr = m_pSorterSchema->GetAttrIndex ( m_tQuery.m_sSortBy.cstr() );
if ( iSortAttr<0 )
{
Err ( "sort-by attribute '%s' not found", m_tQuery.m_sSortBy.cstr() );
return false;
}
const CSphColumnInfo & tAttr = m_pSorterSchema->GetAttr ( iSortAttr );
m_tStateMatch.m_eKeypart[0] = Attr2Keypart ( tAttr.m_eAttrType );
m_tStateMatch.m_tLocator[0] = tAttr.m_tLocator;
m_tStateMatch.m_dAttrs[0] = iSortAttr;
RemapAttrs ( m_tStateMatch, m_dMatchJsonExprs );
}
ExtraAddSortkeys ( m_tStateMatch.m_dAttrs );
// find out what function to use and whether it needs attributes
switch (m_tQuery.m_eSort )
{
case SPH_SORT_TIME_SEGMENTS: m_eMatchFunc = FUNC_TIMESEGS; break;
case SPH_SORT_RELEVANCE: m_eMatchFunc = FUNC_REL_DESC; break;
default:
Err ( "unknown sorting mode %d", m_tQuery.m_eSort );
return false;
}
return true;
}
bool QueueCreator_c::SetupGroupSortingFunc ( bool bGotDistinct )
{
assert ( m_bGotGroupby );
CSphString sGroupOrderBy = m_tQuery.m_sGroupSortBy;
if ( sGroupOrderBy=="@weight desc" )
AddKnnDistSort ( sGroupOrderBy );
ESortClauseParseResult eRes = sphParseSortClause ( m_tQuery, sGroupOrderBy.cstr(), *m_pSorterSchema, m_eGroupFunc, m_tStateGroup, m_dGroupJsonExprs, m_tSettings.m_bComputeItems, m_tSettings.m_pJoinArgs.get(), m_sError );
if ( eRes==SORT_CLAUSE_ERROR || eRes==SORT_CLAUSE_RANDOM )
{
if ( eRes==SORT_CLAUSE_RANDOM )
m_sError = "groups can not be sorted by @random";
return false;
}
ExtraAddSortkeys ( m_tStateGroup.m_dAttrs );
if ( !m_tGroupSorterSettings.m_bImplicit )
{
for ( const auto & tGroupColumn : m_dGroupColumns )
m_hExtra.Add ( m_pSorterSchema->GetAttr ( tGroupColumn.first ).m_sName );
}
if ( bGotDistinct )
{
m_dGroupColumns.Add ( { m_pSorterSchema->GetAttrIndex ( m_tQuery.m_sGroupDistinct.cstr() ), true } );
assert ( m_dGroupColumns.Last().first>=0 );
m_hExtra.Add ( m_pSorterSchema->GetAttr ( m_dGroupColumns.Last().first ).m_sName );
}
// implicit case
CSphVector<int> dGroupByCols;
for ( const auto & i : m_dGroupColumns )
if ( i.second )
dGroupByCols.Add ( i.first );
AssignOrderByToPresortStage ( dGroupByCols.Begin(), dGroupByCols.GetLength() );
AssignOrderByToPresortStage ( m_tStateGroup.m_dAttrs, CSphMatchComparatorState::MAX_ATTRS );
// GroupSortBy str attributes setup
RemapAttrs ( m_tStateGroup, m_dGroupJsonExprs );
return true;
}
// set up aggregate filter for grouper
std::unique_ptr<ISphFilter> QueueCreator_c::CreateAggrFilter () const
{
assert ( m_bGotGroupby );
if ( m_pSorterSchema->GetAttr ( m_tSettings.m_pAggrFilter->m_sAttrName.cstr() ) )
return sphCreateAggrFilter ( m_tSettings.m_pAggrFilter, m_tSettings.m_pAggrFilter->m_sAttrName, *m_pSorterSchema, m_sError );
// having might reference aliased attributes but @* attributes got stored without alias in sorter schema
CSphString sHaving;
for ( const auto & tItem : m_tQuery.m_dItems )
if ( tItem.m_sAlias==m_tSettings.m_pAggrFilter->m_sAttrName )
{
sHaving = tItem.m_sExpr;
break;
}
if ( sHaving=="groupby()" )
sHaving = "@groupby";
else if ( sHaving=="count(*)" )
sHaving = "@count";
return sphCreateAggrFilter ( m_tSettings.m_pAggrFilter, sHaving, *m_pSorterSchema, m_sError );
}
void QueueCreator_c::SetupCollation()
{
SphStringCmp_fn fnCmp = GetStringCmpFunc ( m_tQuery.m_eCollation );
m_tStateMatch.m_fnStrCmp = fnCmp;
m_tStateGroup.m_fnStrCmp = fnCmp;
}
bool QueueCreator_c::AddGroupbyStuff ()
{
// need schema with group related columns however not need grouper
m_bHeadWOGroup = ( m_tQuery.m_sGroupBy.IsEmpty () && m_tQuery.m_bFacetHead );
auto fnIsImplicit = [] ( const CSphQueryItem & t )
{
return ( t.m_eAggrFunc!=SPH_AGGR_NONE ) || t.m_sExpr=="count(*)" || t.m_sExpr=="@distinct";
};
bool bHasImplicitGrouping = HasImplicitGrouping(m_tQuery);
// count(*) and distinct wo group by at main query should keep implicit flag
if ( bHasImplicitGrouping && m_bHeadWOGroup )
m_bHeadWOGroup = !m_tQuery.m_dRefItems.any_of ( fnIsImplicit );
if ( !SetupGroupbySettings(bHasImplicitGrouping) )
return false;
// or else, check in SetupGroupbySettings() would already fail
m_bGotGroupby = !m_tQuery.m_sGroupBy.IsEmpty () || m_tGroupSorterSettings.m_bImplicit;
m_bGotDistinct = !!m_tGroupSorterSettings.m_pDistinctFetcher;
if ( m_bHasGroupByExpr && !m_bGotGroupby )
return Err ( "GROUPBY() is allowed only in GROUP BY queries" );
// check for HAVING constrains
if ( !CheckHavingConstraints() )
return false;
// now let's add @groupby stuff, if necessary
return MaybeAddGroupbyMagic(m_bGotDistinct);
}
bool QueueCreator_c::SetGroupSorting()
{
if ( m_bGotGroupby )
{
if ( !SetupGroupSortingFunc ( m_bGotDistinct ) )
return false;
if ( m_tSettings.m_pAggrFilter && !m_tSettings.m_pAggrFilter->m_sAttrName.IsEmpty() )
{
auto pFilter = CreateAggrFilter ();
if ( !pFilter )
return false;
m_tGroupSorterSettings.m_pAggrFilterTrait = pFilter.release();
}
int iDistinctAccuracyThresh = m_tQuery.m_bExplicitDistinctThresh ? m_tQuery.m_iDistinctThresh : GetDistinctThreshDefault();
m_tGroupSorterSettings.SetupDistinctAccuracy ( iDistinctAccuracyThresh );
}
for ( auto & tIdx: m_hExtra )
{
m_hQueryColumns.Add ( tIdx.first );
if ( m_pExtra )
m_pExtra->Add ( tIdx.first );
}
return true;
}
bool QueueCreator_c::PredictAggregates() const
{
for ( int i = 0; i < m_pSorterSchema->GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = m_pSorterSchema->GetAttr(i);
if ( !(tAttr.m_eAggrFunc==SPH_AGGR_NONE || IsGroupbyMagic ( tAttr.m_sName ) || IsSortStringInternal ( tAttr.m_sName.cstr () )) )
return true;
}
return false;
}
int QueueCreator_c::ReduceMaxMatches() const
{
assert ( !m_bGotGroupby );
if ( m_tQuery.m_bExplicitMaxMatches || m_tQuery.m_bHasOuter || !m_tSettings.m_bComputeItems )
return Max ( m_tSettings.m_iMaxMatches, 1 );
return Max ( Min ( m_tSettings.m_iMaxMatches, m_tQuery.m_iLimit+m_tQuery.m_iOffset ), 1 );
}
int QueueCreator_c::AdjustMaxMatches ( int iMaxMatches ) const
{
assert ( m_bGotGroupby );
if ( m_tQuery.m_bExplicitMaxMatches || m_tSettings.m_bForceSingleThread )
return iMaxMatches;
int iGroupbyAttr = GetGroupbyAttrIndex();
if ( iGroupbyAttr<0 )
return iMaxMatches;
CSphString sModifiedAttr;
int iCountDistinct = m_tSettings.m_fnGetCountDistinct ? m_tSettings.m_fnGetCountDistinct ( m_pSorterSchema->GetAttr(iGroupbyAttr).m_sName, sModifiedAttr ) : -1;
if ( iCountDistinct > m_tQuery.m_iMaxMatchThresh )
return iMaxMatches;
return Max ( iCountDistinct, iMaxMatches );
}
bool QueueCreator_c::CanCalcFastCountDistinct() const
{
bool bHasAggregates = PredictAggregates();
return !bHasAggregates && m_tGroupSorterSettings.m_bImplicit && m_tGroupSorterSettings.m_bDistinct && m_tQuery.m_dFilters.IsEmpty() && m_tQuery.m_sQuery.IsEmpty() && m_tQuery.m_sKNNAttr.IsEmpty() && m_tQuery.m_eJoinType!=JoinType_e::INNER;
}
bool QueueCreator_c::CanCalcFastCountFilter() const
{
bool bHasAggregates = PredictAggregates();
return !bHasAggregates && m_tGroupSorterSettings.m_bImplicit && !m_tGroupSorterSettings.m_bDistinct && m_tQuery.m_dFilters.GetLength()==1 && m_tQuery.m_sQuery.IsEmpty() && m_tQuery.m_sKNNAttr.IsEmpty() && m_tQuery.m_eJoinType!=JoinType_e::INNER;
}
bool QueueCreator_c::CanCalcFastCount() const
{
bool bHasAggregates = PredictAggregates();
return !bHasAggregates && m_tGroupSorterSettings.m_bImplicit && !m_tGroupSorterSettings.m_bDistinct && m_tQuery.m_dFilters.IsEmpty() && m_tQuery.m_sQuery.IsEmpty() && m_tQuery.m_sKNNAttr.IsEmpty() && m_tQuery.m_eJoinType!=JoinType_e::INNER;
}
PrecalculatedSorterResults_t QueueCreator_c::FetchPrecalculatedValues() const
{
PrecalculatedSorterResults_t tPrecalc;
if ( CanCalcFastCountDistinct() )
{
int iCountDistinctAttr = GetGroupDistinctAttrIndex();
if ( iCountDistinctAttr>0 && m_tSettings.m_bEnableFastDistinct )
tPrecalc.m_iCountDistinct = m_tSettings.m_fnGetCountDistinct ? m_tSettings.m_fnGetCountDistinct ( m_pSorterSchema->GetAttr(iCountDistinctAttr).m_sName, tPrecalc.m_sAttr ) : -1;
}
if ( CanCalcFastCountFilter() )
tPrecalc.m_iCountFilter = m_tSettings.m_fnGetCountFilter ? m_tSettings.m_fnGetCountFilter ( m_tQuery.m_dFilters[0], tPrecalc.m_sAttr ) : -1;
if ( CanCalcFastCount() )
tPrecalc.m_iCount = m_tSettings.m_fnGetCount ? m_tSettings.m_fnGetCount() : -1;
return tPrecalc;
}
ISphMatchSorter * QueueCreator_c::SpawnQueue()
{
bool bNeedFactors = !!(m_uPackedFactorFlags & SPH_FACTOR_ENABLE);
if ( m_bGotGroupby )
{
m_tGroupSorterSettings.m_bGrouped = m_tSettings.m_bGrouped;
m_tGroupSorterSettings.m_iMaxMatches = AdjustMaxMatches ( m_tGroupSorterSettings.m_iMaxMatches );
if ( m_pProfile )
m_pProfile->m_iMaxMatches = m_tGroupSorterSettings.m_iMaxMatches;
PrecalculatedSorterResults_t tPrecalc = FetchPrecalculatedValues();
return CreateSorter ( m_eMatchFunc, m_eGroupFunc, &m_tQuery, m_tGroupSorterSettings, bNeedFactors, PredictAggregates(), tPrecalc );
}
if ( m_tQuery.m_iLimit == -1 && m_tSettings.m_pSqlRowBuffer )
return CreateDirectSqlQueue ( m_tSettings.m_pSqlRowBuffer, m_tSettings.m_ppOpaque1, m_tSettings.m_ppOpaque2, std::move (m_tSettings.m_dCreateSchema) );
if ( m_tSettings.m_pCollection )
return CreateCollectQueue ( m_tSettings.m_iMaxMatches, *m_tSettings.m_pCollection );
int iMaxMatches = ReduceMaxMatches();
if ( m_pProfile )
m_pProfile->m_iMaxMatches = iMaxMatches;
ISphMatchSorter * pResult = CreatePlainSorter ( m_eMatchFunc, m_tQuery.m_bSortKbuffer, iMaxMatches, bNeedFactors );
if ( !pResult )
return nullptr;
return CreateColumnarProxySorter ( pResult, iMaxMatches, *m_pSorterSchema, m_tStateMatch, m_eMatchFunc, bNeedFactors, m_tSettings.m_bComputeItems, m_bMulti );
}
bool QueueCreator_c::SetupComputeQueue ()
{
return AddJoinAttrs()
&& AddJoinFilterAttrs()
&& MaybeAddGeodistColumn ()
&& AddKNNDistColumn()
&& MaybeAddExprColumn ()
&& MaybeAddExpressionsFromSelectList ()
&& AddExpressionsForUpdates()
&& AddNullBitmask();
}
bool QueueCreator_c::SetupGroupQueue ()
{
return AddGroupbyStuff ()
&& SetupMatchesSortingFunc ()
&& SetGroupSorting ();
}
bool QueueCreator_c::ConvertColumnarToDocstore()
{
// don't use docstore (need to try to keep schemas similar for multiquery to work)
if ( m_tQuery.m_bFacet || m_tQuery.m_bFacetHead )
return true;
// check for columnar attributes that have FINAL eval stage
// if we have more than 1 of such attributes (and they are also stored), we replace columnar expressions with columnar expressions
CSphVector<int> dStoredColumnar;
auto & tSchema = *m_pSorterSchema;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
auto & tAttr = tSchema.GetAttr(i);
bool bStored = false;
bool bColumnar = tAttr.m_pExpr && tAttr.m_pExpr->IsColumnar(&bStored);
if ( bColumnar && bStored && tAttr.m_eStage==SPH_EVAL_FINAL )
dStoredColumnar.Add(i);
}
if ( dStoredColumnar.GetLength()<=1 )
return true;
for ( auto i : dStoredColumnar )
{
auto & tAttr = const_cast<CSphColumnInfo&>( tSchema.GetAttr(i) );
CSphString sColumnarAttrName;
tAttr.m_pExpr->Command ( SPH_EXPR_GET_COLUMNAR_COL, &sColumnarAttrName );
tAttr.m_pExpr = CreateExpr_GetStoredAttr ( sColumnarAttrName, tAttr.m_eAttrType );
}
return true;
}
bool QueueCreator_c::SetupQueue ()
{
return SetupComputeQueue ()
&& SetupGroupQueue ()
&& ConvertColumnarToDocstore();
}
ISphMatchSorter * QueueCreator_c::CreateQueue ()
{
SetupCollation();
if ( m_bHeadWOGroup && m_tGroupSorterSettings.m_bImplicit )
{
m_tGroupSorterSettings.m_bImplicit = false;
m_bGotGroupby = false;
}
///////////////////
// spawn the queue
///////////////////
ISphMatchSorter * pTop = SpawnQueue();
if ( !pTop )
{
Err ( "internal error: unhandled sorting mode (match-sort=%d, group=%d, group-sort=%d)", m_eMatchFunc, m_bGotGroupby, m_eGroupFunc );
return nullptr;
}
assert ( pTop );
pTop->SetSchema ( m_pSorterSchema.release(), false );
pTop->SetState ( m_tStateMatch );
pTop->SetGroupState ( m_tStateGroup );
pTop->SetRandom ( m_bRandomize );
if ( !m_bHaveStar && m_hQueryColumns.GetLength() )
pTop->SetFilteredAttrs ( m_hQueryColumns, m_tSettings.m_bNeedDocids || m_bExprsNeedDocids );
if ( m_bRandomize )
{
if ( m_tQuery.m_iRandSeed>=0 )
sphSrand ( (DWORD)m_tQuery.m_iRandSeed );
else
sphAutoSrand();
}
return pTop;
}
static void ResetRemaps ( CSphMatchComparatorState & tState )
{
for ( int i = 0; i<CSphMatchComparatorState::MAX_ATTRS; i++ )
{
if ( tState.m_dRemapped.BitGet ( i ) && tState.m_eKeypart[i]==SPH_KEYPART_STRINGPTR )
tState.m_dRemapped.BitClear ( i );
}
}
bool QueueCreator_c::SetSchemaGroupQueue ( const CSphRsetSchema & tNewSchema )
{
// need to reissue remap but with existed attributes
ResetRemaps ( m_tStateMatch );
ResetRemaps ( m_tStateGroup );
*m_pSorterSchema = tNewSchema;
return SetupGroupQueue();
}
///////////////////////////////////////////////////////////////////////////////
static ISphMatchSorter * CreateQueue ( QueueCreator_c & tCreator, SphQueueRes_t & tRes )
{
ISphMatchSorter * pSorter = tCreator.CreateQueue ();
tRes.m_bZonespanlist = tCreator.m_bZonespanlist;
tRes.m_uPackedFactorFlags = tCreator.m_uPackedFactorFlags;
tRes.m_bJoinedGroupSort = tCreator.m_bJoinedGroupSort;
return pSorter;
}
static void CreateSorters ( const VecTraits_T<CSphQuery> & dQueries, const VecTraits_T<ISphMatchSorter*> & dSorters, const VecTraits_T<QueueCreator_c> & dCreators, const VecTraits_T<CSphString> & dErrors, SphQueueRes_t & tRes )
{
ARRAY_FOREACH ( i, dCreators )
{
if ( !dCreators[i].m_bCreate )
continue;
dSorters[i] = CreateQueue ( dCreators[i], tRes );
assert ( dSorters[i]!=nullptr );
}
if ( tRes.m_bAlowMulti )
{
ISphMatchSorter * pSorter0 = nullptr;
for ( int iCheck=0; iCheck<dSorters.GetLength(); ++iCheck )
{
if ( !dCreators[iCheck].m_bCreate )
continue;
assert ( dSorters[iCheck] );
if ( !pSorter0 )
{
pSorter0 = dSorters[iCheck];
continue;
}
assert ( dSorters[iCheck]->GetSchema()->GetAttrsCount()==pSorter0->GetSchema()->GetAttrsCount() );
}
}
}
static void CreateMultiQueue ( RawVector_T<QueueCreator_c> & dCreators, const SphQueueSettings_t & tQueue, const VecTraits_T<CSphQuery> & dQueries, VecTraits_T<ISphMatchSorter*> & dSorters, VecTraits_T<CSphString> & dErrors, SphQueueRes_t & tRes, StrVec_t * pExtra, QueryProfile_c * pProfile )
{
assert ( dSorters.GetLength()>1 );
assert ( dSorters.GetLength()==dQueries.GetLength() );
assert ( dSorters.GetLength()==dErrors.GetLength() );
dCreators.Reserve_static ( dSorters.GetLength () );
dCreators.Emplace_back( tQueue, dQueries[0], dErrors[0], pExtra, pProfile );
dCreators[0].m_bMulti = true;
// same as SetupQueue
bool bSuccess = dCreators[0].SetupComputeQueue ();
// copy schema WO group by and internals
CSphRsetSchema tRefSchema = dCreators[0].SorterSchema();
bool bHasJson = dCreators[0].HasJson();
bool bJsonMixed = false;
if ( bSuccess )
bSuccess &= dCreators[0].SetupGroupQueue ();
dCreators[0].m_bCreate = bSuccess;
// create rest of schemas
for ( int i=1; i<dSorters.GetLength(); ++i )
{
// fill extra only for initial pass
dCreators.Emplace_back ( tQueue, dQueries[i], dErrors[i], pExtra, pProfile );
dCreators[i].m_bMulti = true;
if ( !dCreators[i].SetupQueue () )
{
dCreators[i].m_bCreate = false;
continue;
}
bJsonMixed |= ( bHasJson!=dCreators[i].HasJson () );
bHasJson |= dCreators[i].HasJson();
}
// FIXME!!! check attributes and expressions matches
bool bSame = !bJsonMixed;
const auto& tSchema0 = dCreators[0].SorterSchema();
for ( int i=1; i<dCreators.GetLength() && bSame; ++i )
{
const auto & tCur = dCreators[i].SorterSchema();
bSame &= ( tSchema0.GetDynamicSize()==tCur.GetDynamicSize() && tSchema0.GetAttrsCount()==tCur.GetAttrsCount() );
}
// same schemes
if ( bSame )
return;
CSphRsetSchema tMultiSchema = tRefSchema;
int iMinGroups = INT_MAX;
int iMaxGroups = 0;
bool bHasMulti = false;
ARRAY_FOREACH ( iSchema, dCreators )
{
if ( !dCreators[iSchema].m_bCreate )
continue;
int iGroups = 0;
const CSphRsetSchema & tSchema = dCreators[iSchema].SorterSchema();
for ( int iCol=0; iCol<tSchema.GetAttrsCount(); ++iCol )
{
const CSphColumnInfo & tCol = tSchema.GetAttr ( iCol );
if ( !tCol.m_tLocator.m_bDynamic && !tCol.IsColumnar() )
continue;
if ( IsGroupbyMagic ( tCol.m_sName ) )
{
++iGroups;
if ( !IsSortJsonInternal ( tCol.m_sName ))
continue;
}
const CSphColumnInfo * pMultiCol = tMultiSchema.GetAttr ( tCol.m_sName.cstr() );
if ( pMultiCol )
{
bool bDisable1 = false;
bool bDisable2 = false;
// no need to add attributes that already exists
if ( pMultiCol->m_eAttrType==tCol.m_eAttrType &&
( ( !pMultiCol->m_pExpr && !tCol.m_pExpr ) ||
( pMultiCol->m_pExpr && tCol.m_pExpr
&& pMultiCol->m_pExpr->GetHash ( tMultiSchema, SPH_FNV64_SEED, bDisable1 )==tCol.m_pExpr->GetHash ( tSchema, SPH_FNV64_SEED, bDisable2 ) )
) )
continue;
// no need to add a new column, but we need the same schema for the sorters
if ( tCol.IsColumnar() && pMultiCol->IsColumnarExpr() )
{
bHasMulti = true;
continue;
}
if ( !tCol.IsColumnarExpr() || !pMultiCol->IsColumnar() ) // need a new column
{
tRes.m_bAlowMulti = false; // if attr or expr differs need to create regular sorters and issue search WO multi-query
return;
}
}
bHasMulti = true;
tMultiSchema.AddAttr ( tCol, true );
if ( tCol.m_pExpr )
tCol.m_pExpr->FixupLocator ( &tSchema, &tMultiSchema );
}
iMinGroups = Min ( iMinGroups, iGroups );
iMaxGroups = Max ( iMaxGroups, iGroups );
}
// usual multi query should all have similar group by
if ( iMinGroups!=iMaxGroups && !dQueries[0].m_bFacetHead && !dQueries[0].m_bFacet )
{
tRes.m_bAlowMulti = false;
return;
}
// only group attributes differs - create regular sorters
if ( !bHasMulti && !bJsonMixed )
return;
// setup common schemas
for ( QueueCreator_c & tCreator : dCreators )
{
if ( !tCreator.m_bCreate )
continue;
if ( !tCreator.SetSchemaGroupQueue ( tMultiSchema ) )
tCreator.m_bCreate = false;
}
}
///////////////////////////////////////////////////////////////////////////////
ISphMatchSorter * sphCreateQueue ( const SphQueueSettings_t & tQueue, const CSphQuery & tQuery, CSphString & sError, SphQueueRes_t & tRes, StrVec_t * pExtra, QueryProfile_c * pProfile )
{
QueueCreator_c tCreator ( tQueue, tQuery, sError, pExtra, pProfile );
if ( !tCreator.SetupQueue () )
return nullptr;
return CreateQueue ( tCreator, tRes );
}
void sphCreateMultiQueue ( const SphQueueSettings_t & tQueue, const VecTraits_T<CSphQuery> & dQueries, VecTraits_T<ISphMatchSorter *> & dSorters, VecTraits_T<CSphString> & dErrors, SphQueueRes_t & tRes, StrVec_t * pExtra, QueryProfile_c * pProfile )
{
RawVector_T<QueueCreator_c> dCreators;
CreateMultiQueue ( dCreators, tQueue, dQueries, dSorters, dErrors, tRes, pExtra, pProfile );
CreateSorters ( dQueries, dSorters, dCreators, dErrors, tRes );
}
| 85,861
|
C++
|
.cpp
| 2,165
| 36.831871
| 293
| 0.725133
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,922
|
taskping.cpp
|
manticoresoftware_manticoresearch/src/taskping.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "taskping.h"
#include "searchdha.h"
#include "searchdtask.h"
// Ping API proto
class PingBuilder_c final : public RequestBuilder_i, public ReplyParser_i, public ISphRefcountedMT
{
public:
explicit PingBuilder_c ( int iCookie )
: m_iSendCookie ( iCookie )
{}
void BuildRequest ( const AgentConn_t&, ISphOutputBuffer& tOut ) const final
{
// API header
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_PING, VER_COMMAND_PING );
tOut.SendInt ( m_iSendCookie );
}
bool ParseReply ( MemInputBuffer_c& tReq, AgentConn_t& tConn ) const final
{
m_iReceivedCookie = tReq.GetInt ();
tConn.m_tDesc.m_pDash->m_uPingTripUS = sphMicroTimer()-m_iReceivedCookie;
return true;
}
private:
~PingBuilder_c () final = default;
private:
const int m_iSendCookie;
mutable int m_iReceivedCookie = 0;
};
void SchedulePing ( HostDashboardRefPtr_t pHost )
{
static int iPingTask = TaskManager::RegisterGlobal ( "Ping service" );
assert ( iPingTask>=0 && "failed to create ping service task" );
TaskManager::ScheduleJob ( iPingTask, pHost->EngageTime (), [pHost]
{
auto pDesc = PublishSystemInfo ( "PING" );
if ( sphInterrupted() || pHost->m_iNeedPing < 1 )
return;
auto iEngage = pHost->EngageTime();
auto iNow = sphMicroTimer();
// check host engage time (which is linked to last answer time) and don't ping
// if time is not exceeded (that is, if host is under usual load, we don't waste network for pings)
if ( !sph::TimeExceeded ( iEngage, iNow ) )
return SchedulePing ( pHost );
// it is time to ping. Make the connection and schedule the command.
// prepare the agent
using AgentConnRefPtr_t = CSphRefcountedPtr<AgentConn_t>;
AgentConnRefPtr_t pConn { new AgentConn_t };
pConn->m_tDesc.CloneFromHost ( pHost->m_tHost );
assert ( !pHost->m_tHost.m_pDash );
// fixme! Review the timeouts (g_iPingIntervalUs for both came from legacy)
pConn->m_iMyConnectTimeoutMs = int ( g_iPingIntervalUs / 1000 );
pConn->m_iMyQueryTimeoutMs = g_iPingIntervalUs / 1000;
pConn->m_tDesc.m_pDash = pHost;
// Run network task
// sphWarning ( "Ping %s", pConn->m_tDesc.GetMyUrl ().cstr ());
// todo! we send current time and receive it back, but don't use it. We can compare upon receiving with current time and calculate round-trip time, that looks like quite useful metric!
CSphRefcountedPtr<PingBuilder_c> pPinger { new PingBuilder_c ( (int)iNow ) };
RunRemoteTask ( pConn, pPinger, pPinger, [pPinger, pHost] ( bool ) { SchedulePing ( pHost ); } );
});
}
class Pinger_c: public IPinger
{
public:
void Subscribe ( HostDashboardRefPtr_t pHost ) final
{
if ( pHost && pHost->m_iNeedPing >= 1 )
SchedulePing ( pHost );
}
};
void Ping::Start()
{
static Pinger_c dPinger;
SetGlobalPinger ( &dPinger );
}
| 3,242
|
C++
|
.cpp
| 85
| 35.917647
| 186
| 0.728285
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,923
|
netpoll.cpp
|
manticoresoftware_manticoresearch/src/netpoll.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "netpoll.h"
#include "std/timers.h"
#include "searchdaemon.h"
#include <memory>
#if HAVE_KQUEUE
#include <sys/event.h>
#endif
class TimeoutEvents_c
{
TimeoutQueue_c m_dTimeouts;
public:
constexpr static int64_t TIME_INFINITE = -1;
constexpr static int64_t TIME_IMMEDIATE = 0;
void AddOrChangeTimeout ( EnqueuedTimeout_t * pEvent )
{
if ( pEvent->m_iTimeoutTimeUS>=0 )
m_dTimeouts.Change ( pEvent );
}
void RemoveTimeout ( EnqueuedTimeout_t * pEvent )
{
m_dTimeouts.Remove ( pEvent );
}
int64_t GetNextTimeoutUS ( int64_t iGranularity )
{
auto iDefaultTimeoutUS = TIME_INFINITE;
while ( !m_dTimeouts.IsEmpty() )
{
auto * pNetEvent = (EnqueuedTimeout_t *) m_dTimeouts.Root ();
assert ( pNetEvent->m_iTimeoutTimeUS>0 );
auto iNextTimeoutUS = pNetEvent->m_iTimeoutTimeUS - MonoMicroTimer();
if ( iNextTimeoutUS > iGranularity )
return iNextTimeoutUS;
else
iDefaultTimeoutUS = TIME_IMMEDIATE;
m_dTimeouts.Pop ();
}
return iDefaultTimeoutUS;
}
};
#if ( NETPOLL_TYPE==NETPOLL_KQUEUE || NETPOLL_TYPE==NETPOLL_EPOLL )
// both epoll and kqueue have events in hash, so we use also linked list to track them explicitly.
// both stores pointer to the list item in pPtr of backend
using NetPollEventsList_t = boost::intrusive::slist<NetPollEvent_t,
boost::intrusive::member_hook<NetPollEvent_t, netlist_hook_t, &NetPollEvent_t::m_tBackHook>,
boost::intrusive::constant_time_size<true>,
boost::intrusive::cache_last<true>>;
// specific different function for kqueue/epoll
// keep them elementary and tiny for easy debugging across platforms
#if ( NETPOLL_TYPE == NETPOLL_KQUEUE )
struct PollTraits_t
{
using pollev = struct kevent;
constexpr static int64_t poll_granularity = 1LL;
inline static int create_poller ( int ) { return kqueue(); }
inline static void close_poller ( int iPl )
{
if ( iPl >= 0 )
sphSockClose ( iPl );
}
inline static void* get_data ( const pollev& tEv ) { return tEv.udata; }
inline static void translate_events ( const pollev& tEv )
{
auto* pNode = (NetPollEvent_t*)get_data ( tEv );
assert ( pNode && "deleted event recognized" );
if ( !pNode )
return;
pNode->m_uGotEvents = ( ( tEv.flags & EV_ERROR ) ? NetPollEvent_t::IS_ERR : 0 )
| ( ( tEv.flags & EV_EOF ) ? NetPollEvent_t::IS_HUP : 0 )
| ( ( tEv.filter == EVFILT_READ ) ? NetPollEvent_t::IS_READ : NetPollEvent_t::IS_WRITE );
if ( !( pNode->m_uIOActive & NetPollEvent_t::SET_ONESHOT ) )
return;
if ( pNode->m_uIOActive & NetPollEvent_t::SET_READ )
pNode->m_uIOActive &= ~( NetPollEvent_t::SET_READ );
if ( pNode->m_uIOActive & NetPollEvent_t::SET_WRITE )
pNode->m_uIOActive &= ~( NetPollEvent_t::SET_WRITE );
}
inline static int poll_events ( int iPoll, pollev* pEvents, int iEventNum, int64_t timeoutUS )
{
timespec ts;
return kevent ( iPoll, nullptr, 0, pEvents, iEventNum, US2timespec ( ts, timeoutUS ) );
};
inline static int set_polling_for ( int iPoll, int iSock, void* pData, BYTE uIOActive, BYTE uIOChange, bool )
{
bool bWrite = uIOChange & NetPollEvent_t::SET_WRITE;
bool bRead = uIOChange & NetPollEvent_t::SET_READ;
bool bWasWrite = uIOActive & NetPollEvent_t::SET_WRITE;
bool bWasRead = uIOActive & NetPollEvent_t::SET_READ;
auto iOp = EV_ADD
| ( ( uIOChange & NetPollEvent_t::SET_ON_EDGE ) ? EV_CLEAR : 0 )
| ( ( uIOChange & NetPollEvent_t::SET_ONESHOT ) ? EV_ONESHOT : 0 );
struct kevent tEv[2];
auto pEv = &tEv[0];
// boring combination matrix below
if ( bRead && !bWasRead )
EV_SET ( pEv++, iSock, EVFILT_READ, iOp, 0, 0, pData );
if ( bWrite && !bWasWrite )
EV_SET ( pEv++, iSock, EVFILT_WRITE, iOp, 0, 0, pData );
if ( !bRead && bWasRead )
EV_SET ( pEv++, iSock, EVFILT_READ, EV_DELETE, 0, 0, pData );
if ( !bWrite && bWasWrite )
EV_SET ( pEv++, iSock, EVFILT_WRITE, EV_DELETE, 0, 0, pData );
const int nEvs = pEv - tEv;
assert ( nEvs <= 2 );
for ( int i = 0; i < nEvs; ++i )
sphLogDebugv ( "%p kqueue %d setup, ev=%d, fl=%d sock=%d", pData, iPoll, tEv[i].filter, tEv[i].flags, iSock );
return kevent ( iPoll, tEv, nEvs, nullptr, 0, nullptr );
}
inline static int polling_size ( int iQueueSize, int iMaxReady )
{
return iMaxReady ? 2 * Min ( iMaxReady, iQueueSize ) : 2 * iQueueSize;
}
private:
inline static timespec* US2timespec ( timespec& ts, int64_t timeoutUS )
{
timespec* pts = nullptr;
if ( timeoutUS >= 0 )
{
ts.tv_sec = timeoutUS / 1000000;
ts.tv_nsec = (long)( timeoutUS - ts.tv_sec * 1000000 ) * 1000;
pts = &ts;
}
return pts;
}
};
#elif ( NETPOLL_TYPE == NETPOLL_EPOLL )
struct PollTraits_t
{
using pollev = epoll_event;
constexpr static int64_t poll_granularity = 1000LL;
inline static int create_poller ( int iSizeHint ) { return epoll_create ( iSizeHint ); }
inline static void close_poller ( int iPl )
{
if ( iPl >= 0 )
sphSockClose ( iPl );
}
inline static void* get_data ( const pollev& tEv ) { return tEv.data.ptr; }
inline static void translate_events ( const pollev& tEv )
{
auto* pNode = (NetPollEvent_t*)get_data ( tEv );
assert ( pNode && "deleted event recognized" );
if ( !pNode )
return;
pNode->m_uGotEvents = ( ( tEv.events & EPOLLERR ) ? NetPollEvent_t::IS_ERR : 0 )
| ( ( tEv.events & EPOLLHUP ) ? NetPollEvent_t::IS_HUP : 0 )
| ( ( tEv.events & EPOLLIN ) ? NetPollEvent_t::IS_READ : 0 )
| ( ( tEv.events & EPOLLOUT ) ? NetPollEvent_t::IS_WRITE : 0 );
}
inline static int US2Polltime ( int64_t timeoutUS )
{
switch ( timeoutUS )
{
case TimeoutEvents_c::TIME_INFINITE: return -1;
case TimeoutEvents_c::TIME_IMMEDIATE: return 0;
default: return timeoutUS / poll_granularity;
}
}
inline static int poll_events ( int iPoll, pollev* pEvents, int iEventNum, int64_t timeoutUS )
{
return epoll_wait ( iPoll, pEvents, iEventNum, US2Polltime ( timeoutUS ) );
};
inline static const char* epoll_action_name ( int iOp )
{
switch (iOp) { case EPOLL_CTL_ADD:return "EPOLL_CTL_ADD"; case EPOLL_CTL_MOD:return "EPOLL_CTL_MOD"; case EPOLL_CTL_DEL:return "EPOLL_CTL_DEL"; default:return "UNKNWON";};
}
inline static int set_polling_for ( int iPoll, int iSock, void* pData, BYTE, BYTE uIOChange, bool bAdd )
{
bool bRW = uIOChange & NetPollEvent_t::SET_RW;
if ( !bRW && !bAdd )
return 0;
int iOp = bRW ? ( bAdd ? EPOLL_CTL_ADD : EPOLL_CTL_MOD ) : EPOLL_CTL_DEL;
epoll_event tEv;
if ( bRW )
{
tEv.data.ptr = pData;
tEv.events = ( ( uIOChange & NetPollEvent_t::SET_ON_EDGE ) ? EPOLLET : 0 )
| ( ( uIOChange & NetPollEvent_t::SET_ONESHOT ) ? EPOLLONESHOT : 0 )
| ( ( uIOChange & NetPollEvent_t::SET_READ ) ? EPOLLIN : 0 )
| ( ( uIOChange & NetPollEvent_t::SET_WRITE ) ? EPOLLOUT : 0 );
sphLogDebugv ( "%p epoll %d setup, ev=0x%u, op=%s, sock=%d", pData, iPoll, tEv.events, epoll_action_name ( iOp ), iSock );
} else
sphLogDebugv ( "%p epoll %d setup, op=%s, sock=%d", pData, iPoll, epoll_action_name ( iOp ), iSock );
return epoll_ctl ( iPoll, iOp, iSock, &tEv );
}
inline static int polling_size ( int iQueueSize, int iMaxReady )
{
return iMaxReady ? Min ( iMaxReady, iQueueSize ) : iQueueSize;
}
};
#endif
// need for remove from intrusive list to work
inline bool operator== ( const NetPollEvent_t& lhs, const NetPollEvent_t& rhs )
{
return &lhs == &rhs;
}
// common for both epoll and kqueue
class NetPooller_c::Impl_c final : public PollTraits_t
{
friend class NetPooller_c;
friend class NetPollReadyIterator_c;
TimeoutEvents_c m_dTimeouts GUARDED_BY ( NetPoollingThread );
CSphVector<pollev> m_dFiredEvents GUARDED_BY ( NetPoollingThread );
NetPollEventsList_t m_tEvents GUARDED_BY ( NetPoollingThread ); // used for 'for_all'
int m_iReady = 0;
const int m_iMaxReady;
int m_iLastReportedErrno = -1;
int m_iPl;
public:
explicit Impl_c ( int iSizeHint, int iMaxReady )
: m_iMaxReady (iMaxReady)
{
m_iPl = create_poller ( iSizeHint );
if ( m_iPl==-1 )
sphDie ( "failed to create poller main FD, errno=%d, %s", errno, strerrorm ( errno ) );
sphLogDebugv ( "poller %d created", m_iPl );
m_dFiredEvents.Reserve ( iSizeHint );
}
~Impl_c ()
{
sphLogDebugv ( "poller %d closed", m_iPl );
close_poller ( m_iPl );
}
// called from working netloop routine
void SetupEvent ( NetPollEvent_t * pEvent ) REQUIRES ( NetPoollingThread )
{
if ( pEvent->m_uIOChange == NetPollEvent_t::SET_CLOSED || pEvent->m_uIOChange == NetPollEvent_t::SET_NONE )
return RemoveEvent ( pEvent );
assert ( pEvent && pEvent->m_iSock>=0 );
assert ( pEvent->m_uIOChange & NetPollEvent_t::SET_RW );
m_dTimeouts.AddOrChangeTimeout ( pEvent );
bool bIsNew = !pEvent->m_tBackHook.is_linked();
if ( bIsNew )
{
SafeAddRef ( pEvent );
m_tEvents.push_back ( *pEvent );
}
int iRes = set_polling_for ( m_iPl, pEvent->m_iSock, pEvent, pEvent->m_uIOActive, pEvent->m_uIOChange, bIsNew );
pEvent->m_uIOActive = pEvent->m_uIOChange;
if ( iRes == -1 )
sphWarning ( "failed to setup queue event for sock %d, errno=%d, %s", pEvent->m_iSock, errno, strerrorm ( errno ) );
}
// called when client detected error or timeout, and even when netloop routine is not active (i.e., where it is stopped)
void RemoveEvent ( NetPollEvent_t* pEvent ) REQUIRES ( NetPoollingThread )
{
assert ( pEvent );
RemoveTimeout ( pEvent );
sphLogDebugv ( "%p polling remove, ev=%u, sock=%d", pEvent, pEvent->m_uIOChange, pEvent->m_iSock );
if ( pEvent->m_uIOChange != NetPollEvent_t::SET_CLOSED )
{
pEvent->m_uIOChange = NetPollEvent_t::SET_NONE;
int iRes = set_polling_for ( m_iPl, pEvent->m_iSock, pEvent, pEvent->m_uIOActive, 0, true );
// might be already closed by worker from thread pool
if ( iRes == -1 )
sphLogDebugv ( "failed to remove polling event for sock %d(%p), errno=%d, %s", pEvent->m_iSock, pEvent, errno, strerrorm ( errno ) );
}
// since event already removed from kqueue - it is safe to remove it from the list of events also,
// and totally unlink
if ( pEvent->IsLinked() )
{
m_tEvents.remove ( *pEvent );
SafeRelease ( pEvent );
}
}
void Wait ( int64_t iUS ) REQUIRES ( NetPoollingThread )
{
if ( m_tEvents.empty() )
return;
if ( iUS==WAIT_UNTIL_TIMEOUT )
iUS = m_dTimeouts.GetNextTimeoutUS ( poll_granularity );
m_dFiredEvents.Resize ( polling_size ( m_tEvents.size(), m_iMaxReady ) );
// need positive timeout for communicate threads back and shutdown
m_iReady = poll_events ( m_iPl, m_dFiredEvents.Begin (), m_dFiredEvents.GetLength (), iUS );
if ( m_iReady>=0 )
return;
int iErrno = sphSockGetErrno ();
// common recoverable errors
if ( iErrno==EINTR || iErrno==EAGAIN || iErrno==EWOULDBLOCK )
return;
if ( m_iLastReportedErrno!=iErrno )
{
sphWarning ( "polling tick failed: %s", sphSockError ( iErrno ) );
m_iLastReportedErrno = iErrno;
}
}
void ProcessAll ( std::function<void ( NetPollEvent_t * )>&& fnAction ) REQUIRES ( NetPoollingThread )
{
// not ranged-for here, as postfix ++ action for iterator is important (as fnAction can remove elem from list)
for ( auto it { m_tEvents.begin() }, itend { m_tEvents.end() }; it != itend; )
fnAction ( &*it++ );
}
int GetNumOfReady () const
{
return m_iReady;
}
void RemoveTimeout ( NetPollEvent_t * pEvent ) REQUIRES ( NetPoollingThread )
{
assert ( pEvent );
m_dTimeouts.RemoveTimeout ( pEvent );
}
};
// more common for NETPOLL_TYPE==NETPOLL_KQUEUE || NETPOLL_TYPE==NETPOLL_EPOLL
NetPollEvent_t & NetPollReadyIterator_c::operator* ()
{
auto & pOwner = m_pOwner->m_pImpl;
const auto & tEv = pOwner->m_dFiredEvents[m_iIterEv];
PollTraits_t::translate_events ( tEv );
return *(NetPollEvent_t*)PollTraits_t::get_data ( tEv );
};
NetPollReadyIterator_c & NetPollReadyIterator_c::operator++ ()
{
++m_iIterEv;
return *this;
}
bool NetPollReadyIterator_c::operator!= ( const NetPollReadyIterator_c & rhs ) const
{
auto & pOwner = m_pOwner->m_pImpl;
return rhs.m_pOwner || m_iIterEv<pOwner->m_iReady;
}
#endif // #if ( NETPOLL_TYPE==NETPOLL_KQUEUE || NETPOLL_TYPE==NETPOLL_EPOLL )
#if ( NETPOLL_TYPE == NETPOLL_POLL )
struct PollTraits_t
{
constexpr static int64_t poll_granularity = 1000LL;
inline static void set_polling_for ( pollfd& tEv, int iSock, BYTE uIOChange )
{
tEv.fd = iSock;
tEv.events = ( ( uIOChange & NetPollEvent_t::SET_READ ) ? POLLIN : 0 )
| ( ( uIOChange & NetPollEvent_t::SET_WRITE ) ? POLLOUT : 0 );
}
inline static int poll_events ( pollfd* pBegin, int iLength, int64_t timeoutUS )
{
return ::poll ( pBegin, iLength, US2Polltime ( timeoutUS ) );
};
inline static void translate_events ( const pollfd& tEv, NetPollEvent_t* pNode )
{
pNode->m_uGotEvents = ( ( tEv.revents & POLLERR ) ? NetPollEvent_t::IS_ERR : 0 )
| ( ( tEv.revents & POLLHUP ) ? NetPollEvent_t::IS_HUP : 0 )
| ( ( tEv.revents & POLLIN ) ? NetPollEvent_t::IS_READ : 0 )
| ( ( tEv.revents & POLLOUT ) ? NetPollEvent_t::IS_WRITE : 0 );
}
private:
inline static int US2Polltime ( int64_t timeoutUS )
{
switch ( timeoutUS )
{
case TimeoutEvents_c::TIME_INFINITE: return -1;
case TimeoutEvents_c::TIME_IMMEDIATE: return 0;
default: return timeoutUS / poll_granularity;
}
}
};
class NetPooller_c::Impl_c final : public PollTraits_t
{
friend class NetPooller_c;
friend class NetPollReadyIterator_c;
TimeoutEvents_c m_dTimeouts GUARDED_BY ( NetPoollingThread );
CSphVector<NetPollEvent_t *> m_dWork GUARDED_BY ( NetPoollingThread );
CSphVector<pollfd> m_dEvents GUARDED_BY ( NetPoollingThread );
int m_iReady = 0;
int m_iLastReportedErrno = -1;
public:
explicit Impl_c ( int iSizeHint, int iMaxReady )
{
m_dWork.Reserve ( iSizeHint );
m_dEvents.Reserve ( iSizeHint );
if ( iMaxReady!=0 )
sphWarning ( "Setting 'net_throttle_action' is not supported with 'poll' network processor. Discarded.");
}
// called from working netloop routine
void SetupEvent ( NetPollEvent_t * pEvent ) REQUIRES ( NetPoollingThread )
{
if ( pEvent->m_uIOChange == NetPollEvent_t::SET_CLOSED || pEvent->m_uIOChange == NetPollEvent_t::SET_NONE )
return RemoveEvent ( pEvent );
assert ( m_dEvents.GetLength ()==m_dWork.GetLength () );
assert ( pEvent && pEvent->m_iSock>=0 );
assert ( pEvent->m_uIOChange & NetPollEvent_t::SET_RW );
m_dTimeouts.AddOrChangeTimeout ( pEvent );
pollfd* pEv = nullptr;
auto& iEventBackIdx = pEvent->m_iBackIdx;
if ( iEventBackIdx>=0 && iEventBackIdx<m_dWork.GetLength () && !m_dWork[iEventBackIdx] ) // was already enqueued, just change events
{
m_dWork[iEventBackIdx] = pEvent;
pEv = &m_dEvents[iEventBackIdx];
sphLogDebugvv ( "SetupEvent [%d] old %d", pEvent->m_iBackIdx, pEvent->m_iSock );
} else
{
SafeAddRef ( pEvent );
iEventBackIdx = m_dWork.GetLength ();
m_dWork.Add ( pEvent );
pEv = &m_dEvents.Add ();
sphLogDebugvv ( "SetupEvent [%d] new %d", pEvent->m_iBackIdx, pEvent->m_iSock );
}
set_polling_for ( *pEv, pEvent->m_iSock, pEvent->m_uIOChange );
pEvent->m_uIOActive = pEvent->m_uIOChange;
sphLogDebugv ( "SetupEvent [%d] for %d events %d", pEvent->m_iBackIdx, pEvent->m_iSock, pEv->events );
}
// called when client detected error or timeout, and even when netloop routine is not active (i.e., where it is stopped)
void RemoveEvent ( NetPollEvent_t* pEvent ) REQUIRES ( NetPoollingThread )
{
assert ( pEvent );
RemoveTimeout ( pEvent );
sphLogDebugvv ( "RemoveEvent for %d, fd=%d", pEvent->m_iBackIdx, pEvent->m_iSock );
if ( !pEvent->IsLinked() ) // already removed by iteration
return;
assert ( pEvent->m_iBackIdx < m_dEvents.GetLength() );
m_dEvents[pEvent->m_iBackIdx].fd = -1;
m_dWork[pEvent->m_iBackIdx] = nullptr;
pEvent->m_iBackIdx = -1;
SafeRelease ( pEvent );
}
void Wait ( int64_t iUS ) REQUIRES ( NetPoollingThread )
{
m_iReady = 0;
if ( m_dEvents.IsEmpty() )
return;
// need positive timeout for communicate threads back and shutdown
if ( iUS == WAIT_UNTIL_TIMEOUT )
iUS = m_dTimeouts.GetNextTimeoutUS ( poll_granularity );
m_dEvents.for_each ( [] ( pollfd& dEv ) { dEv.revents = 0; } );
m_iReady = poll_events ( m_dEvents.Begin (), m_dEvents.GetLength (), iUS );
if ( m_iReady>=0 )
{
sphLogDebugvv ( "Wait returned %d events", m_iReady );
return;
}
int iErrno = sphSockGetErrno ();
sphLogDebugvv ( "Wait returned %d events with %d after-error", m_iReady, iErrno );
// common recoverable errors
if ( iErrno==EINTR || iErrno==EAGAIN || iErrno==EWOULDBLOCK )
return;
if ( m_iLastReportedErrno!=iErrno )
{
sphWarning ( "polling tick failed: %s", sphSockError ( iErrno ) );
m_iLastReportedErrno = iErrno;
}
}
void ProcessAll ( std::function<void ( NetPollEvent_t * )>&& fnAction ) REQUIRES ( NetPoollingThread )
{
ARRAY_FOREACH ( i, m_dWork )
{
auto* pNode = m_dWork[i];
if ( pNode && pNode->m_iBackIdx >= 0 )
{
pNode->m_iBackIdx = i; // adjust index, it might be broken because of RemoveFast
fnAction ( pNode );
}
else
{
m_dEvents.RemoveFast ( i );
m_dWork.RemoveFast ( i );
--i;
}
}
assert ( m_dEvents.GetLength ()==m_dWork.GetLength () );
}
int GetNumOfReady () const
{
return m_iReady;
}
void RemoveTimeout ( NetPollEvent_t * pEvent ) REQUIRES ( NetPoollingThread )
{
assert ( pEvent );
m_dTimeouts.RemoveTimeout ( pEvent );
}
};
// on windows pollfd.fd is unsigned for some unknown reason, hence the warning
#ifdef _WIN32
#pragma warning(push)
#pragma warning(disable:4146)
#endif
// trick: here we unlink ready oneshoted from the list
NetPollEvent_t & NetPollReadyIterator_c::operator* ()
{
auto & pOwner = m_pOwner->m_pImpl;
auto & tEv = pOwner->m_dEvents[m_iIterEv];
NetPollEvent_t * pNode = pOwner->m_dWork[m_iIterEv];
sphLogDebugvv ( "[%d] tEv.revents = %d for %d(%d)", m_iIterEv, tEv.revents, pNode->m_iSock, tEv.fd );
PollTraits_t::translate_events ( tEv, pNode );
if ( pNode->m_uIOActive & NetPollEvent_t::SET_ONESHOT )
{
tEv.fd = -tEv.fd;
pOwner->m_dWork[m_iIterEv] = nullptr;
pNode->m_iBackIdx = -1;
pNode->m_uIOActive &= ~( NetPollEvent_t::SET_ONESHOT );
pNode->Release();
}
return *pNode;
}
#ifdef _WIN32
#pragma warning(pop)
#endif
NetPollReadyIterator_c & NetPollReadyIterator_c::operator++ ()
{
auto & pOwner = m_pOwner->m_pImpl;
while (true)
{
++m_iIterEv;
if ( m_iIterEv>=pOwner->m_dEvents.GetLength() )
break;
pollfd& tEv = pOwner->m_dEvents[m_iIterEv];
if ( tEv.fd>=0 && tEv.revents!=0 && tEv.revents!=POLLNVAL )
{
sphLogDebugvv ( "operator++ on m_iIterEv as matched %d and %d", tEv.fd, tEv.revents );
break;
}
}
return *this;
}
bool NetPollReadyIterator_c::operator!= ( const NetPollReadyIterator_c & rhs ) const
{
auto & pOwner = m_pOwner->m_pImpl;
return rhs.m_pOwner || m_iIterEv<pOwner->m_dEvents.GetLength();
}
#endif
NetPooller_c::NetPooller_c ( int isizeHint, int iMaxReady )
: m_pImpl ( std::make_unique<Impl_c> ( isizeHint, iMaxReady ) )
{}
NetPooller_c::~NetPooller_c () = default;
void NetPooller_c::SetupEvent ( NetPollEvent_t * pEvent )
{
m_pImpl->SetupEvent ( pEvent );
}
void NetPooller_c::Wait ( int64_t iUS )
{
m_pImpl->Wait ( iUS );
}
int NetPooller_c::GetNumOfReady () const
{
return m_pImpl->GetNumOfReady();
}
void NetPooller_c::ProcessAll ( std::function<void ( NetPollEvent_t * )>&& fnAction )
{
m_pImpl->ProcessAll ( std::move ( fnAction ) );
}
void NetPooller_c::RemoveTimeout ( NetPollEvent_t * pEvent )
{
m_pImpl->RemoveTimeout ( pEvent );
}
void NetPooller_c::RemoveEvent ( NetPollEvent_t * pEvent )
{
m_pImpl->RemoveEvent ( pEvent );
}
int64_t NetPooller_c::TickGranularity() const
{
return m_pImpl->poll_granularity;
}
ThreadRole NetPoollingThread;
| 20,255
|
C++
|
.cpp
| 557
| 33.549372
| 173
| 0.686839
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,924
|
sphinxsort.cpp
|
manticoresoftware_manticoresearch/src/sphinxsort.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxsort.h"
#include "sortcomp.h"
#include "aggregate.h"
#include "distinct.h"
#include "netreceive_ql.h"
#include "queuecreator.h"
#include "sortertraits.h"
#include "sortergroup.h"
#include "grouper.h"
#include "knnmisc.h"
#include "joinsorter.h"
#include "querycontext.h"
#include <ctime>
#if !_WIN32
#include <unistd.h>
#include <sys/time.h>
#endif
static bool g_bAccurateAggregation = false;
static int g_iDistinctThresh = 3500;
void SetAccurateAggregationDefault ( bool bEnabled )
{
g_bAccurateAggregation = bEnabled;
}
bool GetAccurateAggregationDefault()
{
return g_bAccurateAggregation;
}
void SetDistinctThreshDefault ( int iThresh )
{
g_iDistinctThresh = iThresh;
}
int GetDistinctThreshDefault()
{
return g_iDistinctThresh;
}
//////////////////////////////////////////////////////////////////////////
// SORTING QUEUES
//////////////////////////////////////////////////////////////////////////
template < typename COMP >
struct InvCompareIndex_fn
{
const VecTraits_T<CSphMatch>& m_dBase;
const CSphMatchComparatorState & m_tState;
explicit InvCompareIndex_fn ( const CSphMatchQueueTraits & tBase )
: m_dBase ( tBase.GetMatches() )
, m_tState ( tBase.GetState() )
{}
bool IsLess ( int a, int b ) const // inverts COMP::IsLess
{
return COMP::IsLess ( m_dBase[b], m_dBase[a], m_tState );
}
};
#define LOG_COMPONENT_KMQ __LINE__ << " *(" << this << ") "
#define LOG_LEVEL_DIAG false
#define KMQ LOC(DIAG,KMQ)
/// heap sorter
/// plain binary heap based PQ
template < typename COMP, bool NOTIFICATIONS >
class CSphMatchQueue final : public CSphMatchQueueTraits
{
using MYTYPE = CSphMatchQueue<COMP, NOTIFICATIONS>;
LOC_ADD;
public:
/// ctor
explicit CSphMatchQueue ( int iSize )
: CSphMatchQueueTraits ( iSize )
, m_fnComp ( *this )
{
if constexpr ( NOTIFICATIONS )
m_dJustPopped.Reserve(1);
}
bool IsGroupby () const final { return false; }
const CSphMatch * GetWorst() const final { return m_dIData.IsEmpty() ? nullptr : Root(); }
bool Push ( const CSphMatch & tEntry ) final { return PushT ( tEntry, [this] ( CSphMatch & tTrg, const CSphMatch & tMatch ) { m_pSchema->CloneMatch ( tTrg, tMatch ); }); }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) final
{
for ( auto & i : dMatches )
if ( i.m_tRowID!=INVALID_ROWID )
PushT ( i, [this] ( CSphMatch & tTrg, const CSphMatch & tMatch ) { m_pSchema->CloneMatch ( tTrg, tMatch ); } );
else
m_iTotal++;
}
bool PushGrouped ( const CSphMatch &, bool ) final { assert(0); return false; }
/// store all entries into specified location in sorted order, and remove them from queue
int Flatten ( CSphMatch * pTo ) final
{
KMQ << "flatten";
assert ( !IsEmpty() );
int iReadyMatches = Used();
pTo += iReadyMatches;
while ( !IsEmpty() )
{
--pTo;
m_pSchema->FreeDataPtrs(*pTo);
pTo->ResetDynamic();
PopAndProcess_T ( [pTo] ( CSphMatch & tRoot )
{
Swap ( *pTo, tRoot );
return true;
}
);
}
m_iTotal = 0;
return iReadyMatches;
}
/// finalize, perform final sort/cut as needed
void Finalize ( MatchProcessor_i & tProcessor, bool bCallProcessInResultSetOrder, bool bFinalizeMatches ) final
{
KMQ << "finalize";
if ( !GetLength() )
return;
if ( bCallProcessInResultSetOrder )
m_dIData.Sort ( m_fnComp );
if ( tProcessor.ProcessInRowIdOrder() )
{
CSphFixedVector<int> dSorted ( m_dIData.GetLength() );
memcpy ( dSorted.Begin(), m_dIData.Begin(), m_dIData.GetLength()*sizeof(m_dIData[0]) );
// sort by tag, rowid. minimize columnar switches inside expressions and minimize seeks inside columnar iterators
dSorted.Sort ( Lesser ( [this] ( int l, int r )
{
int iTagL = m_dData[l].m_iTag;
int iTagR = m_dData[r].m_iTag;
if ( iTagL!=iTagR )
return iTagL < iTagR;
return m_dData[l].m_tRowID < m_dData[r].m_tRowID;
}
) );
CSphFixedVector<CSphMatch *> dMatchPtrs ( dSorted.GetLength() );
ARRAY_FOREACH ( i, dSorted )
dMatchPtrs[i] = &m_dData[dSorted[i]];
tProcessor.Process(dMatchPtrs);
}
else
{
for ( auto iMatch : m_dIData )
tProcessor.Process ( &m_dData[iMatch] );
}
}
// fixme! test
ISphMatchSorter * Clone () const final
{
auto pClone = new MYTYPE ( m_iSize );
CloneTo ( pClone );
return pClone;
}
// FIXME! test CSphMatchQueue
void MoveTo ( ISphMatchSorter * pRhs, bool bCopyMeta ) final
{
KMQ << "moveto";
// m_dLogger.Print ();
auto& dRhs = *(MYTYPE *) pRhs;
if ( IsEmpty() )
return; // no matches, nothing to do.
// dRhs.m_dLogger.Print ();
// install into virgin sorter - no need to do something; just swap
if ( dRhs.IsEmpty() )
{
SwapMatchQueueTraits ( dRhs );
return;
}
// work as in non-ordered finalize call, but we not need to
// clone the matches, may just move them instead.
// total need special care: just add two values and don't rely
// on result of moving, since it will be wrong
auto iTotal = dRhs.m_iTotal;
for ( auto i : m_dIData )
dRhs.PushT ( m_dData[i], [] ( CSphMatch & tTrg, CSphMatch & tMatch ) { Swap ( tTrg, tMatch ); } );
dRhs.m_iTotal = m_iTotal + iTotal;
}
void SetMerge ( bool bMerge ) final {}
private:
InvCompareIndex_fn<COMP> m_fnComp;
CSphMatch * Root() const
{
return &m_dData [ m_dIData.First() ];
}
/// generic add entry to the queue
template <typename MATCH, typename PUSHER>
bool PushT ( MATCH && tEntry, PUSHER && PUSH )
{
++m_iTotal;
if constexpr ( NOTIFICATIONS )
{
m_tJustPushed = RowTagged_t();
m_dJustPopped.Resize(0);
}
if ( Used()==m_iSize )
{
// if it's worse that current min, reject it, else pop off current min
if ( COMP::IsLess ( tEntry, *Root(), m_tState ) )
return true;
else
PopAndProcess_T ( [] ( const CSphMatch & ) { return false; } );
}
// do add
PUSH ( Add(), std::forward<MATCH> ( tEntry ));
if constexpr ( NOTIFICATIONS )
m_tJustPushed = RowTagged_t ( *Last() );
int iEntry = Used()-1;
// shift up if needed, so that worst (lesser) ones float to the top
while ( iEntry )
{
int iParent = ( iEntry-1 ) / 2;
if ( !m_fnComp.IsLess ( m_dIData[iParent], m_dIData[iEntry] ) )
break;
// entry is less than parent, should float to the top
Swap ( m_dIData[iEntry], m_dIData[iParent] );
iEntry = iParent;
}
return true;
}
/// remove root (ie. top priority) entry
template<typename POPPER>
void PopAndProcess_T ( POPPER && fnProcess )
{
assert ( !IsEmpty() );
auto& iJustRemoved = m_dIData.Pop();
if ( !IsEmpty() ) // for empty just popped is the root
Swap ( m_dIData.First (), iJustRemoved );
if ( !fnProcess ( m_dData[iJustRemoved] ) )
{
// make the last entry my new root
if constexpr ( NOTIFICATIONS )
{
if ( m_dJustPopped.IsEmpty () )
m_dJustPopped.Add ( RowTagged_t ( m_dData[iJustRemoved] ) );
else
m_dJustPopped[0] = RowTagged_t ( m_dData[iJustRemoved] );
}
}
// sift down if needed
int iEntry = 0;
auto iUsed = Used();
while (true)
{
// select child
int iChild = (iEntry*2) + 1;
if ( iChild>=iUsed )
break;
// select smallest child
if ( iChild+1<iUsed )
if ( m_fnComp.IsLess ( m_dIData[iChild], m_dIData[iChild+1] ) )
++iChild;
// if smallest child is less than entry, do float it to the top
if ( m_fnComp.IsLess ( m_dIData[iEntry], m_dIData[iChild] ) )
{
Swap ( m_dIData[iChild], m_dIData[iEntry] );
iEntry = iChild;
continue;
}
break;
}
}
};
#define LOG_COMPONENT_KBF __LINE__ << " *(" << this << ") "
#define KBF LOC(DIAG,KBF)
//////////////////////////////////////////////////////////////////////////
/// K-buffer (generalized double buffer) sorter
/// faster worst-case but slower average-case than the heap sorter
/// invoked with select ... OPTION sort_method=kbuffer
template < typename COMP, bool NOTIFICATIONS >
class CSphKbufferMatchQueue : public CSphMatchQueueTraits
{
using MYTYPE = CSphKbufferMatchQueue<COMP, NOTIFICATIONS>;
InvCompareIndex_fn<COMP> m_dComp;
LOC_ADD;
public:
/// ctor
explicit CSphKbufferMatchQueue ( int iSize )
: CSphMatchQueueTraits ( iSize*COEFF )
, m_dComp ( *this )
{
m_iSize /= COEFF;
if constexpr ( NOTIFICATIONS )
m_dJustPopped.Reserve ( m_iSize*(COEFF-1) );
}
bool IsGroupby () const final { return false; }
int GetLength () final { return Min ( Used(), m_iSize ); }
bool Push ( const CSphMatch & tEntry ) override { return PushT ( tEntry, [this] ( CSphMatch & tTrg, const CSphMatch & tMatch ) { m_pSchema->CloneMatch ( tTrg, tMatch ); }); }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) override
{
for ( const auto & i : dMatches )
if ( i.m_tRowID!=INVALID_ROWID )
PushT ( i, [this] ( CSphMatch & tTrg, const CSphMatch & tMatch ) { m_pSchema->CloneMatch ( tTrg, tMatch ); } );
else
m_iTotal++;
}
bool PushGrouped ( const CSphMatch &, bool ) final { assert(0); return false; }
/// store all entries into specified location in sorted order, and remove them from queue
int Flatten ( CSphMatch * pTo ) final
{
KBF << "Flatten";
FinalizeMatches ();
auto iReadyMatches = Used();
for ( auto iMatch : m_dIData )
{
KBF << "fltn " << m_dData[iMatch].m_iTag << ":" << m_dData[iMatch].m_tRowID;
Swap ( *pTo, m_dData[iMatch] );
++pTo;
}
m_iMaxUsed = ResetDynamic ( m_iMaxUsed );
// clean up for the next work session
m_pWorst = nullptr;
m_iTotal = 0;
m_bFinalized = false;
m_dIData.Resize(0);
return iReadyMatches;
}
/// finalize, perform final sort/cut as needed
void Finalize ( MatchProcessor_i & tProcessor, bool, bool bFinalizeMatches ) final
{
KBF << "Finalize";
if ( IsEmpty() )
return;
if ( bFinalizeMatches )
FinalizeMatches();
for ( auto iMatch : m_dIData )
tProcessor.Process ( &m_dData[iMatch] );
}
ISphMatchSorter* Clone() const final
{
auto pClone = new MYTYPE ( m_iSize );
CloneTo ( pClone );
return pClone;
}
// FIXME! test CSphKbufferMatchQueue
// FIXME! need to deal with justpushed/justpopped any other way!
void MoveTo ( ISphMatchSorter * pRhs, bool bCopyMeta ) final
{
auto& dRhs = *(CSphKbufferMatchQueue<COMP, NOTIFICATIONS>*) pRhs;
if ( IsEmpty () )
return;
if ( dRhs.IsEmpty () )
{
SwapMatchQueueTraits (dRhs);
dRhs.m_pWorst = m_pWorst;
dRhs.m_bFinalized = m_bFinalized;
return;
}
FinalizeMatches();
// both are non-empty - need to process.
// work as finalize call, but don't clone the matches; move them instead.
// total need special care!
auto iTotal = dRhs.m_iTotal;
for ( auto iMatch : m_dIData )
{
dRhs.PushT ( m_dData[iMatch],
[] ( CSphMatch & tTrg, CSphMatch & tMatch ) {
Swap ( tTrg, tMatch );
});
}
dRhs.m_iTotal = m_iTotal + iTotal;
}
void SetMerge ( bool bMerge ) final {}
protected:
CSphMatch * m_pWorst = nullptr;
bool m_bFinalized = false;
int m_iMaxUsed = -1;
static const int COEFF = 4;
private:
void SortMatches () // sort from best to worst
{
m_dIData.Sort ( m_dComp );
}
void FreeMatch ( int iMatch )
{
if constexpr ( NOTIFICATIONS )
m_dJustPopped.Add ( RowTagged_t ( m_dData[iMatch] ) );
m_pSchema->FreeDataPtrs ( m_dData[iMatch] );
}
void CutTail()
{
if ( Used()<=m_iSize)
return;
m_iMaxUsed = Max ( m_iMaxUsed, this->m_dIData.GetLength () ); // memorize it for free dynamics later.
m_dIData.Slice ( m_iSize ).Apply ( [this] ( int iMatch ) { FreeMatch ( iMatch ); } );
m_dIData.Resize ( m_iSize );
}
// conception: we have array of N*COEFF elems.
// We need only N the best elements from it (rest have to be disposed).
// direct way: rsort, then take first N elems.
// this way: rearrange array by performing one pass of quick sort
// if we have exactly N elems left hand from pivot - we're done.
// otherwise repeat rearranging only to right or left part until the target achieved.
void BinaryPartition ()
{
int iPivot = m_dIData[m_iSize / COEFF+1];
int iMaxIndex = m_iSize-1;
int a=0;
int b=Used()-1;
while (true)
{
int i=a;
int j=b;
while (i<=j)
{
while (m_dComp.IsLess (m_dIData[i],iPivot)) ++i;
while (m_dComp.IsLess (iPivot, m_dIData[j])) --j;
if ( i<=j ) ::Swap( m_dIData[i++], m_dIData[j--]);
}
if ( iMaxIndex == j )
break;
if ( iMaxIndex < j)
b = j; // too many elems acquired; continue with left part
else
a = i; // too less elems acquired; continue with right part
iPivot = m_dIData[( a * ( COEFF-1 )+b ) / COEFF];
}
}
void RepartitionMatches ()
{
assert ( Used ()>m_iSize );
BinaryPartition ();
CutTail();
}
void FinalizeMatches ()
{
if ( m_bFinalized )
return;
m_bFinalized = true;
if ( Used ()>m_iSize )
RepartitionMatches();
SortMatches();
}
// generic push entry (add it some way to the queue clone or swap PUSHER depends on)
template<typename MATCH, typename PUSHER>
FORCE_INLINE bool PushT ( MATCH && tEntry, PUSHER && PUSH )
{
if constexpr ( NOTIFICATIONS )
{
m_tJustPushed = RowTagged_t();
m_dJustPopped.Resize(0);
}
// quick early rejection checks
++m_iTotal;
if ( m_pWorst && COMP::IsLess ( tEntry, *m_pWorst, m_tState ) )
return true;
// quick check passed
// fill the data, back to front
m_bFinalized = false;
PUSH ( Add(), std::forward<MATCH> ( tEntry ));
if constexpr ( NOTIFICATIONS )
m_tJustPushed = RowTagged_t ( *Last() );
// do the initial sort once
if ( m_iTotal==m_iSize )
{
assert ( Used()==m_iSize && !m_pWorst );
SortMatches();
m_pWorst = Last();
m_bFinalized = true;
return true;
}
if ( Used ()<m_iSize*COEFF )
return true;
// do the sort/cut when the K-buffer is full
assert ( Used ()==m_iSize*COEFF );
RepartitionMatches();
SortMatches ();
m_pWorst = Last ();
m_bFinalized = true;
return true;
}
};
//////////////////////////////////////////////////////////////////////////
/// collect list of matched DOCIDs in aside compressed blob
/// (mainly used to collect docs in `DELETE... WHERE` statement)
class CollectQueue_c final : public MatchSorter_c, ISphNoncopyable
{
using BASE = MatchSorter_c;
public:
CollectQueue_c ( int iSize, CSphVector<BYTE>& dCollectedValues );
bool IsGroupby () const final { return false; }
int GetLength () final { return 0; } // that ensures, flatten() will never called;
bool Push ( const CSphMatch& tEntry ) final { return PushMatch(tEntry); }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) final
{
for ( const auto & i : dMatches )
if ( i.m_tRowID!=INVALID_ROWID )
PushMatch(i);
}
bool PushGrouped ( const CSphMatch &, bool ) final { assert(0); return false; }
int Flatten ( CSphMatch * ) final { return 0; }
void Finalize ( MatchProcessor_i &, bool, bool ) final;
bool CanBeCloned() const final { return false; }
ISphMatchSorter * Clone () const final { return nullptr; }
void MoveTo ( ISphMatchSorter *, bool ) final {}
void SetSchema ( ISphSchema * pSchema, bool bRemapCmp ) final;
bool IsCutoffDisabled() const final { return true; }
void SetMerge ( bool bMerge ) final {}
private:
DocID_t m_iLastID;
int m_iMaxMatches;
CSphVector<DocID_t> m_dUnsortedDocs;
MemoryWriter_c m_tWriter;
bool m_bDocIdDynamic = false;
inline bool PushMatch ( const CSphMatch & tEntry );
inline void ProcessPushed();
};
CollectQueue_c::CollectQueue_c ( int iSize, CSphVector<BYTE>& dCollectedValues )
: m_iLastID ( 0 )
, m_iMaxMatches ( iSize )
, m_tWriter ( dCollectedValues )
{}
/// sort/uniq already collected and store them to writer
void CollectQueue_c::ProcessPushed()
{
m_dUnsortedDocs.Uniq();
for ( auto& iCurId : m_dUnsortedDocs )
m_tWriter.ZipOffset ( iCurId - std::exchange ( m_iLastID, iCurId ) );
m_dUnsortedDocs.Resize ( 0 );
}
bool CollectQueue_c::PushMatch ( const CSphMatch & tEntry )
{
if ( m_dUnsortedDocs.GetLength() >= m_iMaxMatches && m_dUnsortedDocs.GetLength() == m_dUnsortedDocs.GetLimit() )
ProcessPushed();
m_dUnsortedDocs.Add ( sphGetDocID ( m_bDocIdDynamic ? tEntry.m_pDynamic : tEntry.m_pStatic ) );
return true;
}
/// final update pass
void CollectQueue_c::Finalize ( MatchProcessor_i&, bool, bool )
{
ProcessPushed();
m_iLastID = 0;
}
void CollectQueue_c::SetSchema ( ISphSchema * pSchema, bool bRemapCmp )
{
BASE::SetSchema ( pSchema, bRemapCmp );
const CSphColumnInfo * pDocId = pSchema->GetAttr ( sphGetDocidName() );
assert(pDocId);
m_bDocIdDynamic = pDocId->m_tLocator.m_bDynamic;
}
ISphMatchSorter * CreateCollectQueue ( int iMaxMatches, CSphVector<BYTE> & tCollection )
{
return new CollectQueue_c ( iMaxMatches, tCollection );
}
//////////////////////////////////////////////////////////////////////////
void SendSqlSchema ( const ISphSchema& tSchema, RowBuffer_i* pRows, const VecTraits_T<int>& dOrder )
{
pRows->HeadBegin ();
ARRAY_CONSTFOREACH ( i, dOrder )
{
const CSphColumnInfo& tCol = tSchema.GetAttr ( dOrder[i] );
if ( sphIsInternalAttr ( tCol ) )
continue;
if ( i == 0 )
{
assert (tCol.m_sName == "id");
pRows->HeadColumn ( "id", ESphAttr2MysqlColumnStreamed ( SPH_ATTR_UINT64 ) );
continue;
}
if ( tCol.m_eAttrType==SPH_ATTR_TOKENCOUNT )
continue;
pRows->HeadColumn ( tCol.m_sName.cstr(), ESphAttr2MysqlColumnStreamed ( tCol.m_eAttrType ) );
}
pRows->HeadEnd ( false, 0 );
}
using SqlEscapedBuilder_c = EscapedStringBuilder_T<BaseQuotation_T<SqlQuotator_t>>;
void SendSqlMatch ( const ISphSchema& tSchema, RowBuffer_i* pRows, CSphMatch& tMatch, const BYTE* pBlobPool, const VecTraits_T<int>& dOrder, bool bDynamicDocid )
{
auto& dRows = *pRows;
ARRAY_CONSTFOREACH ( i, dOrder )
{
const CSphColumnInfo& dAttr = tSchema.GetAttr ( dOrder[i] );
if ( sphIsInternalAttr ( dAttr ) )
continue;
if ( dAttr.m_eAttrType==SPH_ATTR_TOKENCOUNT )
continue;
CSphAttrLocator tLoc = dAttr.m_tLocator;
ESphAttr eAttrType = dAttr.m_eAttrType;
if ( i == 0 )
eAttrType = SPH_ATTR_UINT64;
switch ( eAttrType )
{
case SPH_ATTR_STRING:
dRows.PutArray ( sphGetBlobAttr ( tMatch, tLoc, pBlobPool ) );
break;
case SPH_ATTR_STRINGPTR:
{
const BYTE* pStr = nullptr;
if ( dAttr.m_eStage == SPH_EVAL_POSTLIMIT )
{
if ( bDynamicDocid )
{
dAttr.m_pExpr->StringEval ( tMatch, &pStr );
} else
{
auto pDynamic = tMatch.m_pDynamic;
if ( tMatch.m_pStatic )
tMatch.m_pDynamic = nullptr;
dAttr.m_pExpr->StringEval ( tMatch, &pStr );
tMatch.m_pDynamic = pDynamic;
}
dRows.PutString ( (const char*)pStr );
SafeDeleteArray ( pStr );
} else {
pStr = (const BYTE*)tMatch.GetAttr ( tLoc );
auto dString = sphUnpackPtrAttr ( pStr );
dRows.PutArray ( dString );
}
}
break;
case SPH_ATTR_INTEGER:
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_BOOL:
dRows.PutNumAsString ( (DWORD)tMatch.GetAttr ( tLoc ) );
break;
case SPH_ATTR_BIGINT:
dRows.PutNumAsString ( tMatch.GetAttr ( tLoc ) );
break;
case SPH_ATTR_UINT64:
dRows.PutNumAsString ( (uint64_t)tMatch.GetAttr ( tLoc ) );
break;
case SPH_ATTR_FLOAT:
dRows.PutFloatAsString ( tMatch.GetAttrFloat ( tLoc ) );
break;
case SPH_ATTR_DOUBLE:
dRows.PutDoubleAsString ( tMatch.GetAttrDouble ( tLoc ) );
break;
case SPH_ATTR_INT64SET:
case SPH_ATTR_UINT32SET:
{
StringBuilder_c dStr;
auto dMVA = sphGetBlobAttr ( tMatch, tLoc, pBlobPool );
dStr << "(";
sphMVA2Str ( dMVA, eAttrType == SPH_ATTR_INT64SET, dStr );
dStr << ")";
dRows.PutArray ( dStr, false );
break;
}
case SPH_ATTR_INT64SET_PTR:
case SPH_ATTR_UINT32SET_PTR:
{
StringBuilder_c dStr;
dStr << "(";
sphPackedMVA2Str ( (const BYTE*)tMatch.GetAttr ( tLoc ), eAttrType == SPH_ATTR_INT64SET_PTR, dStr );
dStr << ")";
dRows.PutArray ( dStr, false );
break;
}
case SPH_ATTR_FLOAT_VECTOR:
{
StringBuilder_c dStr;
auto dFloatVec = sphGetBlobAttr ( tMatch, tLoc, pBlobPool );
dStr << "(";
sphFloatVec2Str ( dFloatVec, dStr );
dStr << ")";
dRows.PutArray ( dStr, false );
}
break;
case SPH_ATTR_FLOAT_VECTOR_PTR:
{
StringBuilder_c dStr;
dStr << "(";
sphPackedFloatVec2Str ( (const BYTE*)tMatch.GetAttr(tLoc), dStr );
dStr << ")";
dRows.PutArray ( dStr, false );
}
break;
case SPH_ATTR_JSON:
{
auto pJson = sphGetBlobAttr ( tMatch, tLoc, pBlobPool );
JsonEscapedBuilder sTmp;
if ( pJson.second )
sphJsonFormat ( sTmp, pJson.first );
auto sJson = Str_t(sTmp);
SqlEscapedBuilder_c dEscaped;
dEscaped.FixupSpacedAndAppendEscapedNoQuotes ( sJson.first, sJson.second );
dRows.PutArray ( dEscaped, false );
}
break;
case SPH_ATTR_JSON_PTR:
{
auto* pString = (const BYTE*)tMatch.GetAttr ( tLoc );
JsonEscapedBuilder sTmp;
if ( pString )
{
auto dJson = sphUnpackPtrAttr ( pString );
sphJsonFormat ( sTmp, dJson.first );
}
auto sJson = Str_t ( sTmp );
SqlEscapedBuilder_c dEscaped;
dEscaped.FixupSpacedAndAppendEscapedNoQuotes ( sJson.first, sJson.second );
dRows.PutArray ( dEscaped, false );
}
break;
case SPH_ATTR_FACTORS:
case SPH_ATTR_FACTORS_JSON:
case SPH_ATTR_JSON_FIELD:
case SPH_ATTR_JSON_FIELD_PTR:
assert ( false ); // index schema never contain such column
break;
default:
dRows.Add ( 1 );
dRows.Add ( '-' );
break;
}
}
if ( !dRows.Commit() )
session::SetKilled ( true );
}
/// stream out matches
class DirectSqlQueue_c final : public MatchSorter_c, ISphNoncopyable
{
using BASE = MatchSorter_c;
public:
DirectSqlQueue_c ( RowBuffer_i * pOutput, void ** ppOpaque1, void ** ppOpaque2, StrVec_t dColumns );
~DirectSqlQueue_c() override;
bool IsGroupby () const final { return false; }
int GetLength () final { return 0; } // that ensures, flatten() will never called;
bool Push ( const CSphMatch& tEntry ) final { return PushMatch(const_cast<CSphMatch&>(tEntry)); }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) final
{
for ( const auto & i : dMatches )
if ( i.m_tRowID!=INVALID_ROWID )
PushMatch(const_cast<CSphMatch&>(i));
}
bool PushGrouped ( const CSphMatch &, bool ) final { assert(0); return false; }
int Flatten ( CSphMatch * ) final { return 0; }
void Finalize ( MatchProcessor_i &, bool, bool ) final;
bool CanBeCloned() const final { return false; }
ISphMatchSorter * Clone () const final { return nullptr; }
void MoveTo ( ISphMatchSorter *, bool ) final {}
void SetSchema ( ISphSchema * pSchema, bool bRemapCmp ) final;
bool IsCutoffDisabled() const final { return true; }
void SetMerge ( bool bMerge ) final {}
void SetBlobPool ( const BYTE* pBlobPool ) final
{
m_pBlobPool = pBlobPool;
MakeCtx();
}
void SetColumnar ( columnar::Columnar_i* pColumnar ) final
{
m_pColumnar = pColumnar;
MakeCtx();
}
private:
bool m_bSchemaSent = false;
int64_t m_iDocs = 0;
RowBuffer_i* m_pOutput;
const BYTE* m_pBlobPool = nullptr;
columnar::Columnar_i* m_pColumnar = nullptr;
CSphVector<ISphExpr*> m_dDocstores;
CSphVector<ISphExpr*> m_dFinals;
void ** m_ppOpaque1 = nullptr;
void ** m_ppOpaque2 = nullptr;
void * m_pCurDocstore = nullptr;
void * m_pCurDocstoreReader = nullptr;
CSphQuery m_dFake;
CSphQueryContext m_dCtx;
StrVec_t m_dColumns;
CSphVector<int> m_dOrder;
bool m_bDynamicDocid;
bool m_bNotYetFinalized = true;
inline bool PushMatch ( CSphMatch & tEntry );
void SendSchemaOnce();
void FinalizeOnce();
void MakeCtx();
};
DirectSqlQueue_c::DirectSqlQueue_c ( RowBuffer_i * pOutput, void ** ppOpaque1, void ** ppOpaque2, StrVec_t dColumns )
: m_pOutput ( pOutput )
, m_ppOpaque1 ( ppOpaque1 )
, m_ppOpaque2 ( ppOpaque2 )
, m_dCtx (m_dFake)
, m_dColumns ( std::move ( dColumns ) )
{}
DirectSqlQueue_c::~DirectSqlQueue_c()
{
FinalizeOnce();
}
void DirectSqlQueue_c::SendSchemaOnce()
{
if ( m_bSchemaSent )
return;
assert ( !m_iDocs );
for ( const auto& sColumn : m_dColumns )
{
auto iIdx = m_pSchema->GetAttrIndex ( sColumn.cstr() );
if ( iIdx >= 0 )
m_dOrder.Add ( iIdx );
}
for ( int i = 0; i < m_pSchema->GetAttrsCount(); ++i )
{
auto& tCol = const_cast< CSphColumnInfo &>(m_pSchema->GetAttr ( i ));
if ( tCol.m_sName == sphGetDocidName() )
m_bDynamicDocid = tCol.m_tLocator.m_bDynamic;
if ( !tCol.m_pExpr )
continue;
switch ( tCol.m_eStage )
{
case SPH_EVAL_FINAL : m_dFinals.Add ( tCol.m_pExpr ); break;
case SPH_EVAL_POSTLIMIT: m_dDocstores.Add ( tCol.m_pExpr ); break;
default:
sphWarning ("Unknown stage in SendSchema(): %d", tCol.m_eStage);
}
}
SendSqlSchema ( *m_pSchema, m_pOutput, m_dOrder );
m_bSchemaSent = true;
}
void DirectSqlQueue_c::MakeCtx()
{
CSphQueryResultMeta tFakeMeta;
CSphVector<const ISphSchema*> tFakeSchemas;
m_dCtx.SetupCalc ( tFakeMeta, *m_pSchema, *m_pSchema, m_pBlobPool, m_pColumnar, tFakeSchemas );
}
bool DirectSqlQueue_c::PushMatch ( CSphMatch & tEntry )
{
SendSchemaOnce();
++m_iDocs;
if ( m_ppOpaque1 )
{
auto pDocstoreReader = *m_ppOpaque1;
if ( pDocstoreReader!=std::exchange (m_pCurDocstore, pDocstoreReader) && pDocstoreReader )
{
DocstoreSession_c::InfoDocID_t tSessionInfo;
tSessionInfo.m_pDocstore = (const DocstoreReader_i *)pDocstoreReader;
tSessionInfo.m_iSessionId = -1;
// value is copied; no leak of pointer to local here.
m_dDocstores.for_each ( [&tSessionInfo] ( ISphExpr* pExpr ) { pExpr->Command ( SPH_EXPR_SET_DOCSTORE_DOCID, &tSessionInfo ); } );
}
}
if ( m_ppOpaque2 )
{
auto pDocstore = *m_ppOpaque2;
if ( pDocstore != std::exchange ( m_pCurDocstoreReader, pDocstore ) && pDocstore )
{
DocstoreSession_c::InfoRowID_t tSessionInfo;
tSessionInfo.m_pDocstore = (Docstore_i*)pDocstore;
tSessionInfo.m_iSessionId = -1;
// value is copied; no leak of pointer to local here.
m_dFinals.for_each ( [&tSessionInfo] ( ISphExpr* pExpr ) { pExpr->Command ( SPH_EXPR_SET_DOCSTORE_ROWID, &tSessionInfo ); } );
}
}
m_dCtx.CalcFinal(tEntry);
SendSqlMatch ( *m_pSchema, m_pOutput, tEntry, m_pBlobPool, m_dOrder, m_bDynamicDocid );
return true;
}
/// final update pass
void DirectSqlQueue_c::Finalize ( MatchProcessor_i&, bool, bool bFinalizeMatches )
{
if ( !bFinalizeMatches )
return;
FinalizeOnce();
}
void DirectSqlQueue_c::FinalizeOnce ()
{
if ( !std::exchange ( m_bNotYetFinalized, false ) )
return;
SendSchemaOnce();
m_pOutput->Eof();
}
void DirectSqlQueue_c::SetSchema ( ISphSchema * pSchema, bool bRemapCmp )
{
BASE::SetSchema ( pSchema, bRemapCmp );
}
ISphMatchSorter * CreateDirectSqlQueue ( RowBuffer_i * pOutput, void ** ppOpaque1, void ** ppOpaque2, const StrVec_t & dColumns )
{
return new DirectSqlQueue_c ( pOutput, ppOpaque1, ppOpaque2, dColumns );
}
//////////////////////////////////////////////////////////////////////////
// SORT CLAUSE PARSER
//////////////////////////////////////////////////////////////////////////
class SortClauseTokenizer_t
{
protected:
const char * m_pCur;
const char * m_pMax;
char * m_pBuf;
protected:
char ToLower ( char c )
{
// 0..9, A..Z->a..z, _, a..z, @, .
if ( ( c>='0' && c<='9' ) || ( c>='a' && c<='z' ) || c=='_' || c=='@' || c=='.' || c=='[' || c==']' || c=='\'' || c=='\"' || c=='(' || c==')' || c=='*' )
return c;
if ( c>='A' && c<='Z' )
return c-'A'+'a';
return 0;
}
public:
explicit SortClauseTokenizer_t ( const char * sBuffer )
{
auto iLen = (int) strlen(sBuffer);
m_pBuf = new char [ iLen+1 ];
m_pMax = m_pBuf+iLen;
m_pCur = m_pBuf;
// make string lowercase but keep case of JSON.field
bool bJson = false;
for ( int i=0; i<=iLen; i++ )
{
char cSrc = sBuffer[i];
char cDst = ToLower ( cSrc );
bJson = ( cSrc=='.' || cSrc=='[' || ( bJson && cDst>0 ) ); // keep case of valid char sequence after '.' and '[' symbols
m_pBuf[i] = bJson ? cSrc : cDst;
}
}
~SortClauseTokenizer_t ()
{
SafeDeleteArray ( m_pBuf );
}
const char * GetToken ()
{
// skip spaces
while ( m_pCur<m_pMax && !*m_pCur )
m_pCur++;
if ( m_pCur>=m_pMax )
return nullptr;
// memorize token start, and move pointer forward
const char * sRes = m_pCur;
while ( *m_pCur )
m_pCur++;
return sRes;
}
bool IsSparseCount ( const char * sTok )
{
const char * sSeq = "(*)";
for ( ; sTok<m_pMax && *sSeq; sTok++ )
{
bool bGotSeq = ( *sSeq==*sTok );
if ( bGotSeq )
sSeq++;
// stop checking on any non-space char outside sequence or sequence end
if ( ( !bGotSeq && !sphIsSpace ( *sTok ) && *sTok!='\0' ) || !*sSeq )
break;
}
if ( !*sSeq && sTok+1<m_pMax && !sTok[1] )
{
// advance token iterator after composite count(*) token
m_pCur = sTok+1;
return true;
} else
{
return false;
}
}
};
//////////////////////////////////////////////////////////////////////////
// SORTING+GROUPING INSTANTIATION
//////////////////////////////////////////////////////////////////////////
ISphMatchSorter * CreateSorter ( ESphSortFunc eMatchFunc, ESphSortFunc eGroupFunc, const CSphQuery * pQuery, const CSphGroupSorterSettings & tSettings, bool bHasPackedFactors, bool bHasAggregates, const PrecalculatedSorterResults_t & tPrecalc )
{
CSphRefcountedPtr<ISphMatchComparator> pComp;
if ( !tSettings.m_bImplicit )
switch ( eMatchFunc )
{
case FUNC_REL_DESC: pComp = new MatchRelevanceLt_fn(); break;
case FUNC_TIMESEGS: pComp = new MatchTimeSegments_fn(); break;
case FUNC_GENERIC1: pComp = new MatchGeneric1_fn(); break;
case FUNC_GENERIC2: pComp = new MatchGeneric2_fn(); break;
case FUNC_GENERIC3: pComp = new MatchGeneric3_fn(); break;
case FUNC_GENERIC4: pComp = new MatchGeneric4_fn(); break;
case FUNC_GENERIC5: pComp = new MatchGeneric5_fn(); break;
case FUNC_EXPR: pComp = new MatchExpr_fn(); break; // only for non-bitfields, obviously
}
return CreateGroupSorter ( eGroupFunc, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates, tPrecalc );
}
/////////////////////////
// SORTING QUEUE FACTORY
/////////////////////////
template < typename COMP >
static ISphMatchSorter * CreatePlainSorter ( bool bKbuffer, int iMaxMatches, bool bFactors )
{
if ( bKbuffer )
{
if ( bFactors )
return new CSphKbufferMatchQueue<COMP, true> ( iMaxMatches );
return new CSphKbufferMatchQueue<COMP, false> ( iMaxMatches );
}
if ( bFactors )
return new CSphMatchQueue<COMP, true> ( iMaxMatches );
return new CSphMatchQueue<COMP, false> ( iMaxMatches );
}
ISphMatchSorter * CreatePlainSorter ( ESphSortFunc eMatchFunc, bool bKbuffer, int iMaxMatches, bool bFactors )
{
switch ( eMatchFunc )
{
case FUNC_REL_DESC: return CreatePlainSorter<MatchRelevanceLt_fn> ( bKbuffer, iMaxMatches, bFactors );
case FUNC_TIMESEGS: return CreatePlainSorter<MatchTimeSegments_fn> ( bKbuffer, iMaxMatches, bFactors );
case FUNC_GENERIC1: return CreatePlainSorter<MatchGeneric1_fn> ( bKbuffer, iMaxMatches, bFactors );
case FUNC_GENERIC2: return CreatePlainSorter<MatchGeneric2_fn> ( bKbuffer, iMaxMatches, bFactors );
case FUNC_GENERIC3: return CreatePlainSorter<MatchGeneric3_fn> ( bKbuffer, iMaxMatches, bFactors );
case FUNC_GENERIC4: return CreatePlainSorter<MatchGeneric4_fn> ( bKbuffer, iMaxMatches, bFactors );
case FUNC_GENERIC5: return CreatePlainSorter<MatchGeneric5_fn> ( bKbuffer, iMaxMatches, bFactors );
case FUNC_EXPR: return CreatePlainSorter<MatchExpr_fn> ( bKbuffer, iMaxMatches, bFactors );
default: return nullptr;
}
}
int ApplyImplicitCutoff ( const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter*> & dSorters, bool bFT )
{
bool bAllPrecalc = dSorters.GetLength() && dSorters.all_of ( []( auto pSorter ){ return pSorter->IsPrecalc(); } );
if ( bAllPrecalc )
return 1; // only need one match for precalc sorters
if ( tQuery.m_iCutoff>0 )
return tQuery.m_iCutoff;
if ( !tQuery.m_iCutoff )
return -1;
// this is the same as checking the sorters for disabled cutoff
// but this works when sorters are not yet available (e.g. GetPseudoShardingMetric())
if ( HasImplicitGrouping ( tQuery ) )
return -1;
if ( !tQuery.m_sKNNAttr.IsEmpty() )
return -1;
bool bDisableCutoff = dSorters.any_of ( []( auto * pSorter ){ return pSorter->IsCutoffDisabled(); } );
if ( bDisableCutoff )
return -1;
// implicit cutoff when there's no sorting and no grouping
if ( !bFT && ( tQuery.m_sSortBy=="@weight desc" || tQuery.m_sSortBy.IsEmpty() ) && tQuery.m_sGroupBy.IsEmpty() && !tQuery.m_bFacet && !tQuery.m_bFacetHead )
return tQuery.m_iLimit+tQuery.m_iOffset;
return -1;
}
| 33,145
|
C++
|
.cpp
| 1,019
| 29.526006
| 244
| 0.665362
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,925
|
columnarmisc.cpp
|
manticoresoftware_manticoresearch/src/columnarmisc.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "columnarmisc.h"
#include "schema/schema.h"
#include "attribute.h"
CSphVector<ScopedTypedIterator_t> CreateAllColumnarIterators ( const columnar::Columnar_i * pColumnar, const ISphSchema & tSchema )
{
CSphVector<ScopedTypedIterator_t> dIterators;
if ( !pColumnar )
return dIterators;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
if ( tAttr.IsColumnar() )
{
std::string sError;
dIterators.Add ( { CreateColumnarIterator ( pColumnar, tAttr.m_sName.cstr(), sError ), tAttr.m_eAttrType } );
assert ( dIterators.Last().first );
}
}
return dIterators;
}
SphAttr_t SetColumnarAttr ( int iAttr, ESphAttr eType, columnar::Builder_i * pBuilder, std::unique_ptr<columnar::Iterator_i> & pIterator, RowID_t tRowID, CSphVector<int64_t> & dTmp )
{
switch ( eType )
{
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
case SPH_ATTR_FLOAT_VECTOR:
{
const BYTE * pResult = nullptr;
int iBytes = pIterator->Get ( tRowID, pResult );
bool b32Bits = eType==SPH_ATTR_UINT32SET || eType==SPH_ATTR_FLOAT_VECTOR;
int iValues = iBytes / ( b32Bits ? sizeof(DWORD) : sizeof(int64_t) );
if ( b32Bits )
{
// need a 64-bit array as input. so we need to convert our 32-bit array to 64-bit entries
dTmp.Resize(iValues);
ARRAY_FOREACH ( i, dTmp )
dTmp[i] = ((DWORD*)pResult)[i];
pBuilder->SetAttr ( iAttr, dTmp.Begin(), iValues );
}
else
pBuilder->SetAttr ( iAttr, (const int64_t*)pResult, iValues );
}
break;
case SPH_ATTR_STRING:
{
const BYTE * pResult = nullptr;
int iBytes = pIterator->Get ( tRowID, pResult);
pBuilder->SetAttr ( iAttr, (const uint8_t*)pResult, iBytes );
}
break;
default:
{
int64_t iValue = pIterator->Get(tRowID);
pBuilder->SetAttr ( iAttr, iValue );
return iValue;
}
}
return 0;
}
void SetDefaultColumnarAttr ( int iAttr, ESphAttr eType, columnar::Builder_i * pBuilder )
{
switch ( eType )
{
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
pBuilder->SetAttr ( iAttr, (const int64_t *)0, 0 );
break;
case SPH_ATTR_STRING:
pBuilder->SetAttr ( iAttr, (const uint8_t *)0, 0 );
break;
default:
pBuilder->SetAttr ( iAttr, 0 );
break;
}
}
PlainOrColumnar_t::PlainOrColumnar_t ( const CSphColumnInfo & tAttr, int iColumnar )
{
m_eType = tAttr.m_eAttrType;
if ( tAttr.IsColumnar() )
m_iColumnarId = iColumnar;
else
m_tLocator = tAttr.m_tLocator;
}
SphAttr_t PlainOrColumnar_t::Get ( RowID_t tRowID, const CSphRowitem * pRow, CSphVector<ScopedTypedIterator_t> & dIterators ) const
{
if ( m_iColumnarId>=0 )
return dIterators[m_iColumnarId].first->Get(tRowID);
return sphGetRowAttr ( pRow, m_tLocator );
}
int PlainOrColumnar_t::Get ( RowID_t tRowID, const CSphRowitem * pRow, const BYTE * pPool, CSphVector<ScopedTypedIterator_t> & dIterators, const uint8_t * & pData ) const
{
if ( m_iColumnarId>=0 )
return dIterators[m_iColumnarId].first->Get ( tRowID, pData );
int iLen = 0;
pData = sphGetBlobAttr ( pRow, m_tLocator, pPool, iLen );
return iLen;
}
| 3,431
|
C++
|
.cpp
| 107
| 29.654206
| 182
| 0.716016
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,926
|
indexer.cpp
|
manticoresoftware_manticoresearch/src/indexer.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxint.h"
#include "fileutils.h"
#include "sphinxutils.h"
#include "sphinxstem.h"
#include "sphinxplugin.h"
#include "attribute.h"
#include "cjkpreprocessor.h"
#include "icu.h"
#include "jieba.h"
#include <config_indexer.h>
#include "indexing_sources/source_sql.h"
#include "indexfiles.h"
#include "tokenizer/charset_definition_parser.h"
#include "tokenizer/tokenizer.h"
#include "secondarylib.h"
#include "knnlib.h"
#include <sys/stat.h>
#include <sys/types.h>
#include <ctype.h>
#include <errno.h>
#include <signal.h>
#if _WIN32
#define popen _popen
#define RMODE "rb"
#include <tlhelp32.h>
#else
#include <unistd.h>
#define RMODE "r"
#endif
/////////////////////////////////////////////////////////////////////////////
static bool g_bQuiet = false;
static bool g_bProgress = true;
static bool g_bPrintQueries = false;
static bool g_bPrintRTQueries = false;
static bool g_bKeepAttrs = false;
static CSphString g_sKeepAttrsPath;
static CSphString g_sDumpRtIndex;
static StrVec_t g_dKeepAttrs;
static const char * g_sBuildStops = NULL;
static int g_iTopStops = 100;
static bool g_bRotate = false;
static bool g_bRotateEach = false;
static bool g_bBuildFreqs = false;
static bool g_bSendHUP = true;
static int g_iMemLimit = 128*1024*1024;
static int g_iMaxXmlpipe2Field = 2*1024*1024;
static int g_iWriteBuffer = 1024*1024;
static int g_iMaxFileFieldBuffer = 8*1024*1024;
static bool g_bIgnoreNonPlain = false;
static ESphOnFileFieldError g_eOnFileFieldError = FFE_IGNORE_FIELD;
static CSphString g_sBannerVersion { szMANTICORE_NAME };
#if _WIN32
static char g_sMinidump[256];
#endif
#define ROTATE_MIN_INTERVAL 100000 // rotate interval 100 ms
/////////////////////////////////////////////////////////////////////////////
template < typename T > struct CSphMTFHashEntry
{
CSphString m_sKey;
CSphMTFHashEntry<T> * m_pNext;
int m_iSlot;
T m_tValue;
};
template < typename T, int SIZE, class HASHFUNC > class CSphMTFHash
{
public:
/// ctor
CSphMTFHash ()
{
m_pData = new CSphMTFHashEntry<T> * [ SIZE ];
for ( int i=0; i<SIZE; i++ )
m_pData[i] = NULL;
}
/// dtor
~CSphMTFHash ()
{
for ( int i=0; i<SIZE; i++ )
{
CSphMTFHashEntry<T> * pHead = m_pData[i];
while ( pHead )
{
CSphMTFHashEntry<T> * pNext = pHead->m_pNext;
SafeDelete ( pHead );
pHead = pNext;
}
}
SafeDeleteArray ( m_pData );
}
/// add record to hash
/// OPTIMIZE: should pass T not by reference for simple types
T & Add ( const char * sKey, int iKeyLen, T & tValue )
{
DWORD uHash = HASHFUNC::Hash ( sKey ) % SIZE;
// find matching entry
CSphMTFHashEntry<T> * pEntry = m_pData [ uHash ];
CSphMTFHashEntry<T> * pPrev = NULL;
while ( pEntry && strcmp ( sKey, pEntry->m_sKey.cstr() ) )
{
pPrev = pEntry;
pEntry = pEntry->m_pNext;
}
if ( !pEntry )
{
// not found, add it, but don't MTF
pEntry = new CSphMTFHashEntry<T>;
if ( iKeyLen )
pEntry->m_sKey.SetBinary ( sKey, iKeyLen );
else
pEntry->m_sKey = sKey;
pEntry->m_pNext = NULL;
pEntry->m_iSlot = (int)uHash;
pEntry->m_tValue = tValue;
if ( !pPrev )
m_pData [ uHash ] = pEntry;
else
pPrev->m_pNext = pEntry;
} else
{
// MTF on access
if ( pPrev )
{
pPrev->m_pNext = pEntry->m_pNext;
pEntry->m_pNext = m_pData [ uHash ];
m_pData [ uHash ] = pEntry;
}
}
return pEntry->m_tValue;
}
/// find first non-empty entry
const CSphMTFHashEntry<T> * FindFirst ()
{
for ( int i=0; i<SIZE; i++ )
if ( m_pData[i] )
return m_pData[i];
return NULL;
}
/// find next non-empty entry
const CSphMTFHashEntry<T> * FindNext ( const CSphMTFHashEntry<T> * pEntry )
{
assert ( pEntry );
if ( pEntry->m_pNext )
return pEntry->m_pNext;
for ( int i=1+pEntry->m_iSlot; i<SIZE; i++ )
if ( m_pData[i] )
return m_pData[i];
return NULL;
}
protected:
CSphMTFHashEntry<T> ** m_pData;
};
#define HASH_FOREACH(_it,_hash) \
for ( _it=_hash.FindFirst(); _it; _it=_hash.FindNext(_it) )
/////////////////////////////////////////////////////////////////////////////
struct Word_t
{
const char * m_sWord;
int m_iCount;
};
inline bool operator < ( const Word_t & a, const Word_t & b)
{
return a.m_iCount < b.m_iCount;
}
class CSphStopwordBuilderDict final : public DictStub_c
{
protected:
~CSphStopwordBuilderDict() final = default;
public:
CSphStopwordBuilderDict () = default;
void Save ( const char * sOutput, int iTop, bool bFreqs );
public:
SphWordID_t GetWordID ( BYTE * pWord ) final;
SphWordID_t GetWordID ( const BYTE * pWord, int iLen, bool ) final;
protected:
struct HashFunc_t
{
static inline DWORD Hash ( const char * sKey )
{
return sphCRC32 ( sKey );
}
};
protected:
CSphMTFHash < int, 1048576, HashFunc_t > m_hWords;
// fake setttings
CSphDictSettings m_tSettings;
CSphVector <CSphSavedFile> m_dSWFileInfos;
CSphVector <CSphSavedFile> m_dWFFileInfos;
};
void CSphStopwordBuilderDict::Save ( const char * sOutput, int iTop, bool bFreqs )
{
FILE * fp = fopen ( sOutput, "w+" );
if ( !fp )
return;
CSphVector<Word_t> dTop;
dTop.Reserve ( 1024 );
const CSphMTFHashEntry<int> * it;
HASH_FOREACH ( it, m_hWords )
{
Word_t t;
t.m_sWord = it->m_sKey.cstr();
t.m_iCount = it->m_tValue;
dTop.Add ( t );
}
dTop.RSort ();
ARRAY_FOREACH ( i, dTop )
{
if ( i>=iTop )
break;
if ( bFreqs )
fprintf ( fp, "%s %d\n", dTop[i].m_sWord, dTop[i].m_iCount );
else
fprintf ( fp, "%s\n", dTop[i].m_sWord );
}
fclose ( fp );
}
SphWordID_t CSphStopwordBuilderDict::GetWordID ( BYTE * pWord )
{
int iZero = 0;
m_hWords.Add ( (const char *)pWord, 0, iZero )++;
return 1;
}
SphWordID_t CSphStopwordBuilderDict::GetWordID ( const BYTE * pWord, int iLen, bool )
{
int iZero = 0;
m_hWords.Add ( (const char *)pWord, iLen, iZero )++;
return 1;
}
/////////////////////////////////////////////////////////////////////////////
struct ConsoleIndexProgress_t: public CSphIndexProgress
{
void ShowImpl ( bool bPhaseEnd ) const final
{
// if in quiet mode, do not show anything at all
// if in no-progress mode, only show phase ends
if ( g_bQuiet || ( !g_bProgress && !bPhaseEnd ) )
return;
StringBuilder_c cOut;
switch ( m_ePhase )
{
case PHASE_COLLECT:
cOut.Sprintf ( "collected %l docs, %.1D MB", m_iDocuments, m_iBytes / 100000 );
break;
// Example: ( "%.4F", 999005 ) will output '99.9005'.
//( "%.3D", (int64_t) -10000 ) will output '-10.000'
case PHASE_SORT:
cOut.Sprintf ( "sorted %.1D Mhits, %.1D%% done", m_iHits / 100000, PercentOf ( m_iHits, m_iHitsTotal ) );
break;
case PHASE_MERGE:
cOut.Sprintf ( "merged %.1D Kwords", m_iWords / 100 );
break;
case PHASE_LOOKUP:
cOut.Sprintf ( "creating lookup: %.1D Kdocs, %.1D%% done", m_iDocids / 100, PercentOf ( m_iDocids, m_iDocidsTotal ) );
break;
case PHASE_SI_BUILD:
cOut.Sprintf ( "creating secondary index" );
break;
case PHASE_JSONSI_BUILD:
cOut.Sprintf ( "creating json secondary index" );
break;
default:
assert ( 0 && "internal error: unhandled progress phase" );
cOut.Sprintf ( "(progress-phase-%d)", m_ePhase );
break;
}
fprintf ( stdout, "%s%c", cOut.cstr(), bPhaseEnd ? '\n' : '\r' );
fflush ( stdout );
};
};
/////////////////////////////////////////////////////////////////////////////
/// parse multi-valued attr definition
bool ParseMultiAttr ( const char * sBuf, CSphColumnInfo & tAttr, const char * sSourceName )
{
// format is as follows:
//
// multi-valued-attr := ATTR-TYPE ATTR-NAME 'from' SOURCE-TYPE [;QUERY] [;RANGE-QUERY]
// ATTR-TYPE := 'uint' | 'timestamp' | 'bigint'
// SOURCE-TYPE := 'field' | 'query' | 'ranged-query'
const char * sTok = NULL;
int iTokLen = -1;
#define LOC_ERR(_arg,_pos) \
{ \
if ( !*(_pos) ) \
fprintf ( stdout, "ERROR: source '%s': unexpected end of line in sql_attr_multi.\n", sSourceName ); \
else \
fprintf ( stdout, "ERROR: source '%s': expected " _arg " in sql_attr_multi, got '%s'.\n", sSourceName, _pos ); \
return false; \
}
#define LOC_SPACE0() { while ( isspace(*sBuf) ) sBuf++; }
#define LOC_SPACE1() { if ( !isspace(*sBuf) ) LOC_ERR ( "token", sBuf ) ; LOC_SPACE0(); }
#define LOC_TOK() { sTok = sBuf; while ( sphIsAlpha(*sBuf) ) sBuf++; iTokLen = sBuf-sTok; }
#define LOC_TOKEQ(_arg) ( iTokLen==(int)strlen(_arg) && strncasecmp ( sTok, _arg, iTokLen )==0 )
#define LOC_TEXT() { if ( *sBuf!=';') LOC_ERR ( "';'", sBuf ); sTok = ++sBuf; while ( *sBuf && *sBuf!=';' ) sBuf++; iTokLen = sBuf-sTok; }
// handle ATTR-TYPE
LOC_SPACE0(); LOC_TOK();
if ( LOC_TOKEQ("uint") ) tAttr.m_eAttrType = SPH_ATTR_UINT32SET;
else if ( LOC_TOKEQ("timestamp") ) tAttr.m_eAttrType = SPH_ATTR_UINT32SET;
else if ( LOC_TOKEQ("bigint") ) tAttr.m_eAttrType = SPH_ATTR_INT64SET;
else LOC_ERR ( "attr type ('uint' or 'timestamp' or 'bigint')", sTok );
// handle ATTR-NAME
LOC_SPACE1(); LOC_TOK ();
if ( iTokLen ) tAttr.m_sName.SetBinary ( sTok, iTokLen );
else LOC_ERR ( "attr name", sTok );
// handle 'from'
LOC_SPACE1(); LOC_TOK();
if ( !LOC_TOKEQ("from") ) LOC_ERR ( "'from' keyword", sTok );
// handle SOURCE-TYPE
LOC_SPACE1(); LOC_TOK(); LOC_SPACE0();
if ( LOC_TOKEQ("field") ) tAttr.m_eSrc = SPH_ATTRSRC_FIELD;
else if ( LOC_TOKEQ("query") ) tAttr.m_eSrc = SPH_ATTRSRC_QUERY;
else if ( LOC_TOKEQ("ranged-query") ) tAttr.m_eSrc = SPH_ATTRSRC_RANGEDQUERY;
else if ( LOC_TOKEQ("ranged-main-query") ) tAttr.m_eSrc = SPH_ATTRSRC_RANGEDMAINQUERY;
else LOC_ERR ( "value source type ('field', or 'query', or 'ranged-query', or 'ranged-main-query')", sTok );
if ( tAttr.m_eSrc==SPH_ATTRSRC_FIELD ) return true;
// handle QUERY
LOC_TEXT();
if ( iTokLen ) tAttr.m_sQuery.SetBinary ( sTok, iTokLen );
else LOC_ERR ( "query", sTok );
if ( tAttr.m_eSrc==SPH_ATTRSRC_QUERY || tAttr.m_eSrc==SPH_ATTRSRC_RANGEDMAINQUERY ) return true;
// handle RANGE-QUERY
LOC_TEXT();
if ( iTokLen ) tAttr.m_sQueryRange.SetBinary ( sTok, iTokLen );
else LOC_ERR ( "range query", sTok );
#undef LOC_ERR
#undef LOC_SPACE0
#undef LOC_SPACE1
#undef LOC_TOK
#undef LOC_TOKEQ
#undef LOC_TEXT
return true;
}
#define LOC_CHECK(_hash,_key,_msg,_add) \
if (!( _hash.Exists ( _key ) )) \
{ \
fprintf ( stdout, "ERROR: key '%s' not found " _msg "\n", _key, _add ); \
return false; \
}
// get string
#define LOC_GETS(_arg,_key) \
if ( hSource.Exists(_key) ) \
_arg = hSource[_key].strval();
// get int
#define LOC_GETI(_arg,_key) \
if ( hSource.Exists(_key) && hSource[_key].intval() ) \
_arg = hSource[_key].intval();
// get int64_t
#define LOC_GETL(_arg,_key) \
if ( hSource.Exists(_key) ) \
_arg = hSource[_key].int64val();
// get bool
#define LOC_GETB(_arg,_key) \
if ( hSource.Exists(_key) ) \
_arg = ( hSource[_key].intval()!=0 );
// get array of strings
#define LOC_GETA(_arg,_key) \
for ( CSphVariant * pVal = hSource(_key); pVal; pVal = pVal->m_pNext ) \
_arg.Add ( pVal->cstr() );
// get time in seconds
#define LOC_GETTS( _arg, _key ) \
if ( hSource.Exists(_key) && hSource[_key].intval() ) \
_arg = hSource.GetMsTimeS(_key,0);
// get time in miliseconds
#define LOC_GETMS( _arg, _key ) \
if ( hSource.Exists(_key) && hSource[_key].intval() ) \
_arg = hSource.GetMsTimeMs(_key,0);
void SqlAttrsConfigure ( CSphSourceParams_SQL & tParams, const CSphVariant * pHead,
ESphAttr eAttrType, const char * sSourceName, bool bIndexedAttr = false )
{
for ( const CSphVariant * pCur = pHead; pCur; pCur= pCur->m_pNext )
{
CSphColumnInfo tCol ( pCur->cstr(), eAttrType );
char * pColon = strchr ( const_cast<char*> ( tCol.m_sName.cstr() ), ':' );
if ( pColon )
{
*pColon = '\0';
if ( eAttrType==SPH_ATTR_INTEGER )
{
int iBits = strtol ( pColon+1, NULL, 10 );
if ( iBits<=0 || iBits>ROWITEM_BITS )
{
fprintf ( stdout, "WARNING: source '%s': attribute '%s': invalid bitcount=%d (bitcount ignored)\n",
sSourceName, tCol.m_sName.cstr(), iBits );
iBits = -1;
}
tCol.m_tLocator.m_iBitCount = iBits;
} else
{
fprintf ( stdout, "WARNING: source '%s': attribute '%s': bitcount is only supported for integer types\n",
sSourceName, tCol.m_sName.cstr() );
}
}
tParams.m_dAttrs.Add ( tCol );
if ( bIndexedAttr )
tParams.m_dAttrs.Last().m_bIndexed = true;
}
}
#if WITH_ZLIB
bool ConfigureUnpack ( CSphVariant * pHead, ESphUnpackFormat eFormat, CSphSourceParams_SQL & tParams, const char * )
{
for ( CSphVariant * pVal = pHead; pVal; pVal = pVal->m_pNext )
{
CSphUnpackInfo & tUnpack = tParams.m_dUnpack.Add();
tUnpack.m_sName = CSphString ( pVal->cstr() );
tUnpack.m_eFormat = eFormat;
}
return true;
}
#else
bool ConfigureUnpack ( CSphVariant * pHead, ESphUnpackFormat, CSphSourceParams_SQL &, const char * sSourceName )
{
if ( pHead )
{
fprintf ( stdout, "ERROR: source '%s': unpack is not supported, rebuild with zlib\n", sSourceName );
return false;
}
return true;
}
#endif // WITH_ZLIB
bool ParseJoinedField ( const char * sBuf, CSphJoinedField * pField, const char * sSourceName )
{
// sanity checks
assert ( pField );
if ( !sBuf || !sBuf[0] )
{
fprintf ( stdout, "ERROR: source '%s': sql_joined_field must not be empty.\n", sSourceName );
return false;
}
#define LOC_ERR(_exp) \
{ \
fprintf ( stdout, "ERROR: source '%s': expected " _exp " in sql_joined_field, got '%s'.\n", sSourceName, sBuf ); \
return false; \
}
#define LOC_TEXT() { if ( *sBuf!=';') LOC_ERR ( "';'" ); sTmp = ++sBuf; while ( *sBuf && *sBuf!=';' ) sBuf++; iTokLen = sBuf-sTmp; }
// parse field name
while ( isspace(*sBuf) )
sBuf++;
const char * sName = sBuf;
while ( sphIsAlpha(*sBuf) )
sBuf++;
if ( sBuf==sName )
LOC_ERR ( "field name" );
pField->m_sName.SetBinary ( sName, sBuf-sName );
if ( !isspace(*sBuf) )
LOC_ERR ( "space" );
while ( isspace(*sBuf) )
sBuf++;
// parse 'from'
if ( strncasecmp ( sBuf, "from", 4 ) )
LOC_ERR ( "'from'" );
sBuf += 4;
if ( !isspace(*sBuf) )
LOC_ERR ( "space" );
while ( isspace(*sBuf) )
sBuf++;
bool bGotRanged = false;
pField->m_bPayload = false;
pField->m_bRangedMain = false;
// parse 'query'
if ( strncasecmp ( sBuf, "payload-query", 13 )==0 )
{
pField->m_bPayload = true;
sBuf += 13;
} else if ( strncasecmp ( sBuf, "query", 5 )==0 )
{
sBuf += 5;
} else if ( strncasecmp ( sBuf, "ranged-query", 12 )==0 )
{
bGotRanged = true;
sBuf += 12;
} else if ( strncasecmp ( sBuf, "ranged-main-query", 17 )==0 )
{
pField->m_bRangedMain = true;
sBuf += 17;
} else
LOC_ERR ( "'query'" );
// parse ';'
while ( isspace(*sBuf) && *sBuf!=';' )
sBuf++;
if ( *sBuf!=';' )
LOC_ERR ( "';'" );
// handle QUERY
const char * sTmp = sBuf;
int iTokLen = 0;
LOC_TEXT();
if ( iTokLen )
pField->m_sQuery.SetBinary ( sTmp, iTokLen );
else
LOC_ERR ( "query" );
if ( !bGotRanged )
return true;
// handle RANGE-QUERY
LOC_TEXT();
if ( iTokLen )
pField->m_sRanged.SetBinary ( sTmp, iTokLen );
else
LOC_ERR ( "range query" );
#undef LOC_ERR
#undef LOC_TEXT
return true;
}
bool SqlParamsConfigure ( CSphSourceParams_SQL & tParams, const CSphConfigSection & hSource, const char * sSourceName )
{
if ( !hSource.Exists("odbc_dsn") ) // in case of odbc source, the host, user, pass and db are not mandatory, since they may be already defined in dsn string.
{
LOC_CHECK ( hSource, "sql_host", "in source '%s'", sSourceName );
LOC_CHECK ( hSource, "sql_user", "in source '%s'", sSourceName );
LOC_CHECK ( hSource, "sql_pass", "in source '%s'", sSourceName );
LOC_CHECK ( hSource, "sql_db", "in source '%s'", sSourceName );
}
LOC_CHECK ( hSource, "sql_query", "in source '%s'", sSourceName );
LOC_GETS ( tParams.m_sHost, "sql_host" );
LOC_GETS ( tParams.m_sUser, "sql_user" );
LOC_GETS ( tParams.m_sPass, "sql_pass" );
LOC_GETS ( tParams.m_sDB, "sql_db" );
LOC_GETI ( tParams.m_uPort, "sql_port" );
LOC_GETS ( tParams.m_sQuery, "sql_query" );
LOC_GETA ( tParams.m_dQueryPre, "sql_query_pre" );
LOC_GETA ( tParams.m_dQueryPreAll, "sql_query_pre_all" );
LOC_GETA ( tParams.m_dQueryPost, "sql_query_post" );
LOC_GETS ( tParams.m_sQueryRange, "sql_query_range" );
LOC_GETA ( tParams.m_dQueryPostIndex, "sql_query_post_index" );
LOC_GETL ( tParams.m_iRangeStep, "sql_range_step" );
LOC_GETS ( tParams.m_sQueryKilllist, "sql_query_killlist" );
LOC_GETS ( tParams.m_sHookConnect, "hook_connect" );
LOC_GETS ( tParams.m_sHookQueryRange, "hook_query_range" );
LOC_GETS ( tParams.m_sHookPostIndex, "hook_post_index" );
LOC_GETMS ( tParams.m_iRangedThrottleMs, "sql_ranged_throttle" );
SqlAttrsConfigure ( tParams, hSource("sql_attr_uint"), SPH_ATTR_INTEGER, sSourceName );
SqlAttrsConfigure ( tParams, hSource("sql_attr_timestamp"), SPH_ATTR_TIMESTAMP, sSourceName );
SqlAttrsConfigure ( tParams, hSource("sql_attr_bool"), SPH_ATTR_BOOL, sSourceName );
SqlAttrsConfigure ( tParams, hSource("sql_attr_float"), SPH_ATTR_FLOAT, sSourceName );
SqlAttrsConfigure ( tParams, hSource("sql_attr_bigint"), SPH_ATTR_BIGINT, sSourceName );
SqlAttrsConfigure ( tParams, hSource("sql_attr_string"), SPH_ATTR_STRING, sSourceName );
SqlAttrsConfigure ( tParams, hSource("sql_attr_json"), SPH_ATTR_JSON, sSourceName );
SqlAttrsConfigure ( tParams, hSource("sql_field_string"), SPH_ATTR_STRING, sSourceName, true );
LOC_GETA ( tParams.m_dFileFields, "sql_file_field" );
tParams.m_iMaxFileBufferSize = g_iMaxFileFieldBuffer;
tParams.m_iRefRangeStep = tParams.m_iRangeStep;
tParams.m_eOnFileFieldError = g_eOnFileFieldError;
// unpack
if ( !ConfigureUnpack ( hSource("unpack_zlib"), SPH_UNPACK_ZLIB, tParams, sSourceName ) )
return false;
if ( !ConfigureUnpack ( hSource("unpack_mysqlcompress"), SPH_UNPACK_MYSQL_COMPRESS, tParams, sSourceName ) )
return false;
tParams.m_uUnpackMemoryLimit = hSource.GetSize ( "unpack_mysqlcompress_maxsize", 16777216 );
// parse multi-attrs
for ( CSphVariant * pVal = hSource("sql_attr_multi"); pVal; pVal = pVal->m_pNext )
{
CSphColumnInfo tAttr;
if ( !ParseMultiAttr ( pVal->cstr(), tAttr, sSourceName ) )
return false;
tParams.m_dAttrs.Add ( tAttr );
}
// parse joined fields
for ( CSphVariant * pVal = hSource("sql_joined_field"); pVal; pVal = pVal->m_pNext )
if ( !ParseJoinedField ( pVal->cstr(), &tParams.m_dJoinedFields.Add(), sSourceName ) )
return false;
// make sure attr names are unique
ARRAY_FOREACH ( i, tParams.m_dAttrs )
for ( int j = i + 1; j < tParams.m_dAttrs.GetLength(); j++ )
{
const CSphString & sName = tParams.m_dAttrs[i].m_sName;
if ( strcasecmp ( sName.cstr(), tParams.m_dAttrs[j].m_sName.cstr() )==0 )
{
fprintf ( stdout, "ERROR: duplicate attribute name: %s\n", sName.cstr() );
return false;
}
}
// additional checks
if ( tParams.m_iRangedThrottleMs<0 )
{
fprintf ( stdout, "WARNING: sql_ranged_throttle must not be negative; throttling disabled\n" );
tParams.m_iRangedThrottleMs = 0;
}
// debug printer
if ( g_bPrintQueries )
tParams.m_bPrintQueries = true;
tParams.m_bPrintRTQueries = g_bPrintRTQueries;
tParams.m_sDumpRTIndex = g_sDumpRtIndex;
return true;
}
#if WITH_POSTGRESQL
#include "indexing_sources/source_pgsql.h"
CSphSource * SpawnSourcePgSQL ( const CSphConfigSection & hSource, const char * sSourceName )
{
assert ( hSource["type"]=="pgsql" );
CSphSourceParams_PgSQL tParams;
if ( !SqlParamsConfigure ( tParams, hSource, sSourceName ) )
return NULL;
LOC_GETS ( tParams.m_sClientEncoding, "sql_client_encoding" );
return CreateSourcePGSQL ( tParams, sSourceName );
}
#endif // WITH_POSTGRESQL
#if WITH_MYSQL
#include "indexing_sources/source_mysql.h"
CSphSource * SpawnSourceMySQL ( const CSphConfigSection & hSource, const char * sSourceName )
{
assert ( hSource["type"]=="mysql" );
CSphSourceParams_MySQL tParams;
if ( !SqlParamsConfigure ( tParams, hSource, sSourceName ) )
return NULL;
LOC_GETS ( tParams.m_sUsock, "sql_sock" );
LOC_GETI ( tParams.m_iFlags, "mysql_connect_flags" );
LOC_GETS ( tParams.m_sSslKey, "mysql_ssl_key" );
LOC_GETS ( tParams.m_sSslCert, "mysql_ssl_cert" );
LOC_GETS ( tParams.m_sSslCA, "mysql_ssl_ca" );
return CreateSourceMysql ( tParams, sSourceName );
}
#endif // WITH_MYSQL
#if WITH_ODBC
#include "indexing_sources/source_odbc.h"
CSphSource * SpawnSourceODBC ( const CSphConfigSection & hSource, const char * sSourceName )
{
assert ( hSource["type"]=="odbc" );
CSphSourceParams_ODBC tParams;
if ( !SqlParamsConfigure ( tParams, hSource, sSourceName ) )
return NULL;
LOC_GETS ( tParams.m_sOdbcDSN, "odbc_dsn" );
LOC_GETS ( tParams.m_sColBuffers, "sql_column_buffers" );
return CreateSourceODBC ( tParams, sSourceName );
}
CSphSource * SpawnSourceMSSQL ( const CSphConfigSection & hSource, const char * sSourceName )
{
assert ( hSource["type"]=="mssql" );
CSphSourceParams_ODBC tParams;
if ( !SqlParamsConfigure ( tParams, hSource, sSourceName ) )
return NULL;
LOC_GETB ( tParams.m_bWinAuth, "mssql_winauth" );
LOC_GETS ( tParams.m_sColBuffers, "sql_column_buffers" );
LOC_GETS ( tParams.m_sOdbcDSN, "odbc_dsn" ); // a shortcut, may be used instead of other specific combination
return CreateSourceMSSQL ( tParams, sSourceName );
}
#endif // WITH_ODBC
#if WITH_EXPAT
#include "indexing_sources/source_xmlpipe2.h"
CSphSource * SpawnSourceXMLPipe ( const CSphConfigSection & hSource, const char * sSourceName )
{
assert ( hSource["type"]=="xmlpipe2" );
if ( !( hSource.Exists ( "xmlpipe_command" ) ))
{
fprintf ( stdout, "ERROR: key 'xmlpipe_command' not found in source '%s'\n", sSourceName );
return NULL;
}
FILE * pPipe = popen ( hSource [ "xmlpipe_command" ].cstr(), RMODE );
if ( !pPipe )
{
fprintf ( stdout, "ERROR: xmlpipe: failed to popen '%s'", hSource [ "xmlpipe_command" ].cstr() );
return NULL;
}
CSphString sError;
CSphSource * pResult = sphCreateSourceXmlpipe2 ( &hSource, pPipe, sSourceName, g_iMaxXmlpipe2Field, sError );
if ( !pResult )
fprintf ( stdout, "ERROR: xmlpipe: %s", sError.cstr() );
return pResult;
}
#endif // WITH_EXPAT
#include "indexing_sources/source_svpipe.h"
CSphSource * SpawnSourceTSVPipe ( const CSphConfigSection & hSource, const char * sSourceName )
{
assert ( hSource["type"]=="tsvpipe" );
if ( !( hSource.Exists ( "tsvpipe_command" ) ))
{
fprintf ( stdout, "ERROR: key 'tsvpipe_command' not found in source '%s'\n", sSourceName );
return NULL;
}
FILE * pPipe = popen ( hSource [ "tsvpipe_command" ].cstr(), RMODE );
if ( !pPipe )
{
fprintf ( stdout, "ERROR: tsvpipe: failed to popen '%s'", hSource [ "tsvpipe_command" ].cstr() );
return NULL;
}
return sphCreateSourceTSVpipe ( &hSource, pPipe, sSourceName );
}
CSphSource * SpawnSourceCSVPipe ( const CSphConfigSection & hSource, const char * sSourceName )
{
assert ( hSource["type"]=="csvpipe" );
if ( !( hSource.Exists ( "csvpipe_command" ) ))
{
fprintf ( stdout, "ERROR: key 'csvpipe_command' not found in source '%s'\n", sSourceName );
return NULL;
}
FILE * pPipe = popen ( hSource [ "csvpipe_command" ].cstr(), RMODE );
if ( !pPipe )
{
fprintf ( stdout, "ERROR: csvpipe: failed to popen '%s'", hSource [ "csvpipe_command" ].cstr() );
return NULL;
}
return sphCreateSourceCSVpipe ( &hSource, pPipe, sSourceName );
}
CSphSource * SpawnSource ( const CSphConfigSection & hSource, const char * sSourceName )
{
if ( !hSource.Exists ( "type" ) )
{
fprintf ( stdout, "ERROR: source '%s': type not found; skipping.\n", sSourceName );
return NULL;
}
#if WITH_POSTGRESQL
if ( hSource["type"]=="pgsql" )
return SpawnSourcePgSQL ( hSource, sSourceName );
#endif
#if WITH_MYSQL
if ( hSource["type"]=="mysql" )
return SpawnSourceMySQL ( hSource, sSourceName );
#endif
#if WITH_ODBC
if ( hSource["type"]=="odbc" )
return SpawnSourceODBC ( hSource, sSourceName );
if ( hSource["type"]=="mssql" )
return SpawnSourceMSSQL ( hSource, sSourceName );
#endif
#if WITH_EXPAT
if ( hSource["type"]=="xmlpipe2" )
return SpawnSourceXMLPipe ( hSource, sSourceName );
#endif
if ( hSource["type"]=="tsvpipe" )
return SpawnSourceTSVPipe ( hSource, sSourceName );
if ( hSource["type"]=="csvpipe" )
return SpawnSourceCSVPipe ( hSource, sSourceName );
fprintf ( stdout, "ERROR: source '%s': unknown type '%s'; skipping.\n", sSourceName,
hSource["type"].cstr() );
return NULL;
}
#undef LOC_CHECK
#undef LOC_GETS
#undef LOC_GETI
#undef LOC_GETL
#undef LOC_GETA
//////////////////////////////////////////////////////////////////////////
// INDEXING
//////////////////////////////////////////////////////////////////////////
bool DoIndex ( const CSphConfigSection & hIndex, const char * szIndexName, const CSphConfigType & hSources, FILE * fpDumpRows )
{
// check index type
bool bPlain = true;
if ( hIndex("type") )
{
const CSphString & sType = hIndex["type"].strval();
bPlain = ( sType=="plain" );
if ( sType!="plain" && sType!="distributed" && sType!="rt" && sType!="template" && sType!="percolate" )
{
fprintf ( stdout, "ERROR: table '%s': unknown type '%s'; fix your config file.\n", szIndexName, sType.cstr() );
fflush ( stdout );
return false;
}
}
if ( !bPlain )
{
if ( !g_bQuiet && !g_bIgnoreNonPlain )
{
fprintf ( stdout, "WARNING: skipping non-plain table '%s'...\n", szIndexName );
fflush ( stdout );
}
return g_bIgnoreNonPlain;
}
// progress bar
if ( !g_bQuiet )
{
fprintf ( stdout, "indexing table '%s'...\n", szIndexName );
fflush ( stdout );
}
// check config
if ( !hIndex("path") )
{
fprintf ( stdout, "ERROR: table '%s': key 'path' not found.\n", szIndexName );
return false;
}
// configure early
// (need bigram settings to spawn a proper indexing tokenizer)
CSphIndexSettings tSettings;
{
CSphString sWarning, sError;
if ( !tSettings.Setup ( hIndex, szIndexName, sWarning, sError ) )
sphDie ( "table '%s': %s", szIndexName, sError.cstr() );
if ( !sWarning.IsEmpty() )
fprintf ( stdout, "WARNING: table '%s': %s\n", szIndexName, sWarning.cstr() );
}
///////////////////
// spawn tokenizer
///////////////////
CSphTokenizerSettings tTokSettings;
{
CSphString sWarning;
tTokSettings.Setup ( hIndex, sWarning );
if ( !sWarning.IsEmpty() )
fprintf ( stdout, "WARNING: table '%s': %s\n", szIndexName, sWarning.cstr() );
}
{
CSphString sWarning;
if ( !CheckTokenizerCJK ( tSettings, tTokSettings, sWarning ) )
fprintf ( stdout, "WARNING: table '%s': %s\n", szIndexName, sWarning.cstr() );
}
CSphDictSettings tDictSettings;
{
CSphString sWarning;
tDictSettings.Setup ( hIndex, nullptr, sWarning );
if ( !sWarning.IsEmpty() )
fprintf ( stdout, "WARNING: table '%s': %s\n", szIndexName, sWarning.cstr() );
}
StrVec_t dWarnings;
CSphString sError;
TokenizerRefPtr_c pTokenizer = Tokenizer::Create ( tTokSettings, nullptr, nullptr, dWarnings, sError );
if ( !pTokenizer )
sphDie ( "table '%s': %s", szIndexName, sError.cstr() );
// enable sentence indexing on tokenizer
// (not in Create() because search time tokenizer does not care)
bool bIndexSP = ( hIndex.GetInt ( "index_sp" )!=0 );
if ( bIndexSP )
if ( !pTokenizer->EnableSentenceIndexing ( sError ) )
sphDie ( "table '%s': %s", szIndexName, sError.cstr() );
if ( hIndex("index_zones") )
if ( !pTokenizer->EnableZoneIndexing ( sError ) )
sphDie ( "table '%s': %s", szIndexName, sError.cstr() );
DictRefPtr_c pDict;
// setup tokenization filters
if ( !g_sBuildStops )
{
// plugin filter
if ( !tSettings.m_sIndexTokenFilter.IsEmpty() )
{
Tokenizer::AddPluginFilterTo ( pTokenizer, tSettings.m_sIndexTokenFilter, sError );
// need token_filter that just passes init phase in case stopwords or wordforms will be loaded
if ( !sError.IsEmpty() )
sphDie ( "table '%s': %s", szIndexName, sError.cstr() );
}
// multiforms filter
pDict = tDictSettings.m_bWordDict
? sphCreateDictionaryKeywords ( tDictSettings, nullptr, pTokenizer, szIndexName, false, tSettings.m_iSkiplistBlockSize, nullptr, sError )
: sphCreateDictionaryCRC ( tDictSettings, nullptr, pTokenizer, szIndexName, false, tSettings.m_iSkiplistBlockSize, nullptr, sError );
if ( !pDict )
sphDie ( "table '%s': %s", szIndexName, sError.cstr() );
MutableIndexSettings_c tMutableSettings;
tMutableSettings.Load ( hIndex, false, nullptr );
bool bNeedExact = ( pDict->HasMorphology() || pDict->GetWordformsFileInfos().GetLength() || tMutableSettings.m_iExpandKeywords );
if ( tSettings.m_bIndexExactWords && !bNeedExact )
{
tSettings.m_bIndexExactWords = false;
fprintf ( stdout, "WARNING: table '%s': no morphology or wordforms, index_exact_words=1 has no effect, ignoring\n", szIndexName );
}
if ( !tSettings.m_bIndexExactWords && ForceExactWords ( tDictSettings.m_bWordDict, pDict->HasMorphology(), tSettings.RawMinPrefixLen(), tSettings.m_iMinInfixLen, pDict->GetSettings().m_sMorphFields.IsEmpty() ) )
{
tSettings.m_bIndexExactWords = true;
fprintf ( stdout, "WARNING: table '%s': dict=keywords and prefixes and morphology enabled, forcing index_exact_words=1\n", szIndexName );
}
bool bExpandExact = ( tSettings.m_bIndexExactWords && ( tMutableSettings.m_iExpandKeywords & KWE_EXACT )==KWE_EXACT );
if ( !pDict->GetSettings().m_sMorphFields.IsEmpty() && !bExpandExact )
fprintf ( stdout, "WARNING: table '%s': morphology_skip_fields set, consider enable expand_keywords\n", szIndexName );
Tokenizer::AddToMultiformFilterTo ( pTokenizer, pDict->GetMultiWordforms () );
// bigram filter
Tokenizer::AddBigramFilterTo ( pTokenizer, tSettings.m_eBigramIndex, tSettings.m_sBigramWords, sError );
if ( !sError.IsEmpty() )
sphDie ( "table '%s': %s", szIndexName, sError.cstr() );
// aot filter
if ( tSettings.m_uAotFilterMask )
sphAotTransformFilter ( pTokenizer, pDict, tSettings.m_bIndexExactWords, tSettings.m_uAotFilterMask );
}
std::unique_ptr<ISphFieldFilter> pFieldFilter;
CSphFieldFilterSettings tFilterSettings;
if ( tFilterSettings.Setup ( hIndex, sError ) )
pFieldFilter = sphCreateRegexpFilter ( tFilterSettings, sError );
if ( !sError.IsEmpty () )
fprintf ( stdout, "WARNING: table '%s': %s\n", szIndexName, sError.cstr() );
if ( !sphSpawnFilterICU ( pFieldFilter, tSettings, tTokSettings, szIndexName, sError ) )
sphDie ( "%s", sError.cstr() );
if ( !SpawnFilterJieba ( pFieldFilter, tSettings, tTokSettings, szIndexName, nullptr, sError ) )
sphDie ( "%s", sError.cstr() );
// boundary
bool bInplaceEnable = hIndex.GetInt ( "inplace_enable", 0 )!=0;
int iHitGap = hIndex.GetSize ( "inplace_hit_gap", 0 );
float fRelocFactor = hIndex.GetFloat ( "inplace_reloc_factor", 0.1f );
float fWriteFactor = hIndex.GetFloat ( "inplace_write_factor", 0.1f );
if ( bInplaceEnable )
{
if ( fRelocFactor < 0.01f || fRelocFactor > 0.9f )
{
fprintf ( stdout, "WARNING: inplace_reloc_factor must be 0.01 to 0.9, clamped\n" );
fRelocFactor = Min ( Max ( fRelocFactor, 0.01f ), 0.9f );
}
if ( fWriteFactor < 0.01f || fWriteFactor > 0.9f )
{
fprintf ( stdout, "WARNING: inplace_write_factor must be 0.01 to 0.9, clamped\n" );
fWriteFactor = Min ( Max ( fWriteFactor, 0.01f ), 0.9f );
}
if ( fWriteFactor+fRelocFactor > 1.0f )
{
fprintf ( stdout, "WARNING: inplace_write_factor+inplace_reloc_factor must be less than 0.9, scaled\n" );
float fScale = 0.9f/(fWriteFactor+fRelocFactor);
fRelocFactor *= fScale;
fWriteFactor *= fScale;
}
}
/////////////////////
// spawn datasources
/////////////////////
bool bHtmlStrip = false;
CSphString sHtmlIndexAttrs, sHtmlRemoveElements;
if ( hIndex("html_strip") )
{
bHtmlStrip = hIndex.GetInt ( "html_strip" )!=0;
sHtmlIndexAttrs = hIndex.GetStr ( "html_index_attrs" );
sHtmlRemoveElements = hIndex.GetStr ( "html_remove_elements" );
} else
{
if ( bIndexSP )
sphWarning ( "table '%s': index_sp=1 requires html_strip=1 to index paragraphs", szIndexName );
if ( hIndex("index_zones") )
sphDie ( "table '%s': index_zones requires html_strip=1", szIndexName );
}
// parse all sources
CSphVector<CSphSource*> dSources;
bool bSpawnFailed = false;
for ( CSphVariant * pSourceName = hIndex("source"); pSourceName; pSourceName = pSourceName->m_pNext )
{
if ( !hSources ( pSourceName->cstr() ) )
{
fprintf ( stdout, "ERROR: table '%s': source '%s' not found.\n", szIndexName, pSourceName->cstr() );
continue;
}
const CSphConfigSection & hSource = hSources [ pSourceName->cstr() ];
CSphSource * pSource = SpawnSource ( hSource, pSourceName->cstr() );
if ( !pSource )
{
bSpawnFailed = true;
continue;
}
if ( bHtmlStrip )
{
if ( !pSource->SetStripHTML ( sHtmlIndexAttrs.cstr(), sHtmlRemoveElements.cstr(), bIndexSP, hIndex.GetStr("index_zones").cstr(), sError ) )
{
fprintf ( stdout, "ERROR: source '%s': %s.\n", pSourceName->cstr(), sError.cstr() );
return false;
}
}
pSource->SetTokenizer ( pTokenizer );
if ( pFieldFilter )
pSource->SetFieldFilter ( pFieldFilter->Clone() );
pSource->SetDumpRows ( fpDumpRows );
dSources.Add ( pSource );
}
if ( bSpawnFailed )
{
fprintf ( stdout, "ERROR: table '%s': failed to configure some of the sources, will not index.\n", szIndexName );
return false;
}
if ( !dSources.GetLength() )
{
fprintf ( stdout, "ERROR: table '%s': no valid sources configured; skipping.\n", szIndexName );
return false;
}
///////////
// do work
///////////
int64_t tmTime = sphMicroTimer();
bool bOK = false;
if ( g_sBuildStops )
{
///////////////////
// build stopwords
///////////////////
if ( !g_bQuiet )
{
fprintf ( stdout, "building stopwords list...\n" );
fflush ( stdout );
}
CSphRefcountedPtr<CSphStopwordBuilderDict> tDict { new CSphStopwordBuilderDict };
ARRAY_FOREACH ( i, dSources )
{
dSources[i]->SetDict ( (DictRefPtr_c)tDict );
if ( !dSources[i]->Connect ( sError ) || !dSources[i]->IterateStart ( sError ) )
{
if ( !sError.IsEmpty() )
fprintf ( stdout, "ERROR: table '%s': %s\n", szIndexName, sError.cstr() );
continue;
}
bool bEOF = false;
while ( dSources[i]->IterateDocument ( bEOF, sError ) && !bEOF )
{
while ( dSources[i]->IterateHits ( sError ) );
if ( !sError.IsEmpty() )
{
fprintf ( stdout, "ERROR: table '%s': %s\n", szIndexName, sError.cstr() );
sError = "";
}
}
if ( !sError.IsEmpty() )
fprintf ( stdout, "ERROR: table '%s': %s\n", szIndexName, sError.cstr() );
}
tDict->Save ( g_sBuildStops, g_iTopStops, g_bBuildFreqs );
bOK = true;
} else
{
//////////
// index!
//////////
// if searchd is running, we want to reindex to .tmp files
CSphString sIndexPath;
sIndexPath.SetSprintf ( g_bRotate ? "%s.tmp" : "%s", hIndex["path"].cstr() );
// do index
auto pIndex = sphCreateIndexPhrase ( szIndexName, sIndexPath );
assert ( pIndex );
// check lock file
if ( !pIndex->Lock() )
{
fprintf ( stdout, "FATAL: %s, will not index. Try --rotate option.\n", pIndex->GetLastError().cstr() );
exit ( 1 );
}
if ( pDict->GetSettings().m_bWordDict && ( tSettings.m_dPrefixFields.GetLength() || tSettings.m_dInfixFields.GetLength() ) )
{
fprintf ( stdout, "WARNING: table '%s': prefix_fields and infix_fields has no effect with dict=keywords, ignoring\n", szIndexName );
}
if ( bInplaceEnable )
pIndex->SetInplaceSettings ( iHitGap, fRelocFactor, fWriteFactor );
pIndex->SetFieldFilter ( std::move ( pFieldFilter ) );
pIndex->SetTokenizer ( pTokenizer );
pIndex->SetDictionary ( pDict );
if ( g_bKeepAttrs )
{
if ( g_sKeepAttrsPath.IsEmpty() )
pIndex->SetKeepAttrs ( hIndex["path"].strval(), g_dKeepAttrs );
else
pIndex->SetKeepAttrs ( g_sKeepAttrsPath, g_dKeepAttrs );
}
pIndex->Setup ( tSettings );
ConsoleIndexProgress_t tProgress;
bOK = pIndex->Build ( dSources, g_iMemLimit, g_iWriteBuffer, tProgress )!=0;
if ( bOK && g_bRotate && g_bSendHUP )
{
sIndexPath.SetSprintf ( "%s.new", hIndex["path"].cstr() );
bOK = pIndex->Rename ( sIndexPath );
}
if ( !bOK )
fprintf ( stdout, "ERROR: table '%s': %s.\n", szIndexName, pIndex->GetLastError().cstr() );
if ( !pIndex->GetLastWarning().IsEmpty() )
fprintf ( stdout, "WARNING: table '%s': %s.\n", szIndexName, pIndex->GetLastWarning().cstr() );
pIndex->Unlock ();
}
// trip report
tmTime = sphMicroTimer() - tmTime;
if ( !g_bQuiet )
{
tmTime = Max ( tmTime, 1 );
int64_t iTotalDocs = 0;
int64_t iTotalBytes = 0;
ARRAY_FOREACH ( i, dSources )
{
const CSphSourceStats & tSource = dSources[i]->GetStats();
iTotalDocs += tSource.m_iTotalDocuments;
iTotalBytes += tSource.m_iTotalBytes;
}
fprintf ( stdout, "total " INT64_FMT " docs, " INT64_FMT " bytes\n", iTotalDocs, iTotalBytes );
fprintf ( stdout, "total %d.%03d sec, %d bytes/sec, %d.%02d docs/sec\n",
(int)(tmTime/1000000), (int)(tmTime%1000000)/1000, // sec
(int)(iTotalBytes*1000000/tmTime), // bytes/sec
(int)(iTotalDocs*1000000/tmTime), (int)(iTotalDocs*1000000*100/tmTime)%100 ); // docs/sec
}
// cleanup and go on
ARRAY_FOREACH ( i, dSources )
SafeDelete ( dSources[i] );
return bOK;
}
static bool RenameIndexFiles ( const char * szPath, const char * szName, CSphIndex * pIndex, bool bRotate )
{
StringBuilder_c sFrom, sTo;
sFrom << szPath << ".tmp";
sTo << szPath;
if ( bRotate )
sTo << ".new";
pIndex->SetFilebase ( sFrom.cstr() );
if ( !pIndex->Rename ( sTo.cstr() ) )
{
fprintf ( stdout, "ERROR: table '%s': failed to rename '%s' to '%s': %s", szName, sFrom.cstr(), sTo.cstr(), pIndex->GetLastError().cstr() );
return false;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
// MERGING
//////////////////////////////////////////////////////////////////////////
bool DoMerge ( const CSphConfigSection & hDst, const char * sDst, const CSphConfigSection & hSrc, const char * sSrc, CSphVector<CSphFilterSettings> & tPurge, bool bRotate, bool bDropSrc )
{
// progress bar
if ( !g_bQuiet )
{
fprintf ( stdout, "merging table '%s' into table '%s'...\n", sSrc, sDst );
fflush ( stdout );
}
// check config
if ( !hDst("path") )
{
fprintf ( stdout, "ERROR: table '%s': key 'path' not found.\n", sDst );
return false;
}
if ( !hSrc("path") )
{
fprintf ( stdout, "ERROR: table '%s': key 'path' not found.\n", sSrc );
return false;
}
// do the merge
auto pSrc = sphCreateIndexPhrase ( "", hSrc["path"].strval() );
auto pDst = sphCreateIndexPhrase ( "", hDst["path"].strval() );
assert ( pSrc );
assert ( pDst );
{
StrVec_t dWarnings;
CSphString sError;
if ( !sphFixupIndexSettings ( pSrc.get(), hSrc, false, nullptr, dWarnings, sError ) )
{
fprintf ( stdout, "ERROR: table '%s': %s\n", sSrc, sError.cstr () );
return false;
}
for ( const auto & i : dWarnings )
fprintf ( stdout, "WARNING: table '%s': %s\n", sSrc, i.cstr() );
}
{
StrVec_t dWarnings;
CSphString sError;
if ( !sphFixupIndexSettings ( pDst.get(), hDst, false, nullptr, dWarnings, sError ) )
{
fprintf ( stdout, "ERROR: table '%s': %s\n", sDst, sError.cstr () );
return false;
}
for ( const auto & i : dWarnings )
fprintf ( stdout, "WARNING: table '%s': %s\n", sDst, i.cstr() );
}
if ( !bRotate )
{
if ( !pSrc->Lock() )
{
fprintf ( stdout, "ERROR: table '%s' is already locked; lock: %s\n", sSrc, pSrc->GetLastError().cstr() );
return false;
}
if ( !pDst->Lock() )
{
fprintf ( stdout, "ERROR: table '%s' is already locked; lock: %s\n", sDst, pDst->GetLastError().cstr() );
return false;
}
}
// if src index has dst index as its killlist_target, we should use this killlist
CSphFixedVector<DocID_t> dKillList(0);
KillListTargets_c tTargets;
CSphString sError;
if ( !pSrc->LoadKillList ( &dKillList, tTargets, sError ) )
{
fprintf ( stdout, "ERROR: %s\n", sError.cstr() );
return false;
}
if ( dKillList.GetLength() )
{
for ( const auto & tTarget : tTargets.m_dTargets )
if ( tTarget.m_sIndex==sDst )
{
if ( tTarget.m_uFlags & KillListTarget_t::USE_KLIST )
{
CSphFilterSettings & dLast = tPurge.Add();
dLast.m_eType = SPH_FILTER_VALUES;
dLast.m_bExclude = true;
dLast.m_sAttrName = sphGetDocidName();
dLast.SetExternalValues ( dKillList );
}
break;
}
}
ConsoleIndexProgress_t tProgress;
int64_t tmMergeTime = sphMicroTimer();
{
if ( !pDst->Merge ( pSrc.get(), tPurge, true, tProgress ) )
sphDie ( "failed to merge table '%s' into table '%s': %s", sSrc, sDst, pDst->GetLastError().cstr() );
if ( !pDst->GetLastWarning().IsEmpty() )
fprintf ( stdout, "WARNING: table '%s': %s\n", sDst, pDst->GetLastWarning().cstr() );
}
if ( bDropSrc )
{
if ( !pSrc->Merge ( pSrc.get(), {}, true, tProgress ) )
sphDie ( "failed to drop table '%s' : %s", sSrc, pSrc->GetLastError().cstr() );
if ( !pSrc->GetLastWarning().IsEmpty() )
fprintf ( stdout, "WARNING: table '%s': %s\n", sSrc, pSrc->GetLastWarning().cstr() );
// write klist with targets but without klist itself
// that will affect the order of index load on rotation, but no actual klist will be applied
CSphString sSrcKlist = pSrc->GetTmpFilename ( SPH_EXT_SPK );
if ( !WriteKillList ( sSrcKlist, nullptr, 0, tTargets, sError ) )
sphDie ( "failed to modify klist target in table '%s': %s", sSrc, sError.cstr() );
}
tmMergeTime = sphMicroTimer() - tmMergeTime;
if ( !g_bQuiet )
printf ( "merged in %d.%03d sec\n", (int)(tmMergeTime/1000000), (int)(tmMergeTime%1000000)/1000 );
// need to close attribute files that was mapped with RW access to unlink and rename them on windows
pSrc->Dealloc();
pSrc->Unlock();
pDst->Dealloc();
pDst->Unlock();
// pick up merge result
if ( !RenameIndexFiles ( hDst["path"].cstr(), sDst, pDst.get(), bRotate ) )
return false;
if ( bDropSrc && !RenameIndexFiles ( hSrc["path"].cstr(), sSrc, pSrc.get(), bRotate ) )
return false;
return true;
}
//////////////////////////////////////////////////////////////////////////
// ENTRY
//////////////////////////////////////////////////////////////////////////
void ReportIOStats ( const char * sType, int iReads, int64_t iReadTime, int64_t iReadBytes )
{
if ( iReads==0 )
{
fprintf ( stdout, "total %d %s, %d.%03d sec, 0.0 kb/call avg, 0.0 msec/call avg\n",
iReads, sType,
(int)(iReadTime/1000000), (int)(iReadTime%1000000)/1000 );
} else
{
iReadBytes /= iReads;
fprintf ( stdout, "total %d %s, %d.%03d sec, %d.%d kb/call avg, %d.%d msec/call avg\n",
iReads, sType,
(int)(iReadTime/1000000), (int)(iReadTime%1000000)/1000,
(int)(iReadBytes/1024), (int)(iReadBytes%1024)*10/1024,
(int)(iReadTime/iReads/1000), (int)(iReadTime/iReads/100)%10 );
}
}
extern int64_t g_iIndexerCurrentDocID;
extern int64_t g_iIndexerCurrentHits;
extern int64_t g_iIndexerCurrentRangeMin;
extern int64_t g_iIndexerCurrentRangeMax;
extern int64_t g_iIndexerPoolStartDocID;
extern int64_t g_iIndexerPoolStartHit;
#if !_WIN32
void sigsegv ( int sig )
{
sphSafeInfo ( STDERR_FILENO, "*** Oops, indexer crashed! Please send the following report to developers." );
sphSafeInfo ( STDERR_FILENO, g_sBannerVersion.cstr() );
sphSafeInfo ( STDERR_FILENO, "-------------- report begins here ---------------" );
sphSafeInfo ( STDERR_FILENO, "Current document: docid=%l, hits=%l", g_iIndexerCurrentDocID, g_iIndexerCurrentHits );
sphSafeInfo ( STDERR_FILENO, "Current batch: minid=%l, maxid=%l", g_iIndexerCurrentRangeMin, g_iIndexerCurrentRangeMax );
sphSafeInfo ( STDERR_FILENO, "Hit pool start: docid=%l, hit=%l", g_iIndexerPoolStartDocID, g_iIndexerPoolStartHit );
sphBacktrace ( STDERR_FILENO );
CRASH_EXIT;
}
void SetSignalHandlers ()
{
struct sigaction sa;
sigfillset ( &sa.sa_mask );
bool bSignalsSet = false;
while (true)
{
// sa.sa_flags = SA_NOCLDSTOP;
// sa.sa_handler = SIG_IGN; if ( sigaction ( SIGCHLD, &sa, NULL )!=0 ) break;
sa.sa_flags = SA_RESETHAND;
sa.sa_handler = sigsegv; if ( sigaction ( SIGSEGV, &sa, NULL )!=0 ) break;
sa.sa_handler = sigsegv; if ( sigaction ( SIGBUS, &sa, NULL )!=0 ) break;
sa.sa_handler = sigsegv; if ( sigaction ( SIGABRT, &sa, NULL )!=0 ) break;
sa.sa_handler = sigsegv; if ( sigaction ( SIGILL, &sa, NULL )!=0 ) break;
sa.sa_handler = sigsegv; if ( sigaction ( SIGFPE, &sa, NULL )!=0 ) break;
bSignalsSet = true;
break;
}
if ( !bSignalsSet )
{
fprintf ( stderr, "sigaction(): %s", strerrorm(errno) );
exit ( 1 );
}
}
#else // if _WIN32
LONG WINAPI sigsegv ( EXCEPTION_POINTERS * pExc )
{
const char * sFail1 = "*** Oops, indexer crashed! Please send ";
const char * sFail2 = " minidump file to developers.\n";
const char * sFailVer = g_sBannerVersion.cstr();
sphBacktrace ( pExc, g_sMinidump );
::write ( STDERR_FILENO, sFail1, (unsigned int) strlen ( sFail1 ) );
::write ( STDERR_FILENO, g_sMinidump, (unsigned int) strlen(g_sMinidump) );
::write ( STDERR_FILENO, sFail2, (unsigned int) strlen ( sFail2 ) );
::write ( STDERR_FILENO, sFailVer, (unsigned int) strlen ( sFailVer ) );
::write ( STDERR_FILENO, "\n", 1);
CRASH_EXIT;
}
void SetSignalHandlers ()
{
snprintf ( g_sMinidump, sizeof(g_sMinidump), "indexer.%d.mdmp", GetCurrentProcessId() );
SetUnhandledExceptionFilter ( sigsegv );
}
#endif // _WIN32
bool SendRotate ( const CSphConfig & hConf, bool bForce )
{
if ( !( g_bRotate && ( g_bRotateEach || bForce ) ) )
return false;
int iPID = -1;
// load config
if ( !hConf.Exists ( "searchd" ) )
{
fprintf ( stdout, "WARNING: 'searchd' section not found in config file.\n" );
return false;
}
const CSphConfigSection & hSearchd = hConf["searchd"]["searchd"];
if ( !hSearchd.Exists ( "pid_file" ) )
{
fprintf ( stdout, "WARNING: 'pid_file' parameter not found in 'searchd' config section.\n" );
return false;
}
CSphString sPidFile = hSearchd["pid_file"].cstr();
// read in PID
FILE * fp = fopen ( sPidFile.cstr(), "r" );
if ( !fp )
{
fprintf ( stdout, "WARNING: failed to open pid_file '%s'.\n", sPidFile.cstr() );
return false;
}
if ( fscanf ( fp, "%d", &iPID )!=1 || iPID<=0 )
{
fprintf ( stdout, "WARNING: failed to scanf pid from pid_file '%s'.\n", sPidFile.cstr() );
fclose ( fp );
return false;
}
fclose ( fp );
#if _WIN32
char szPipeName[64];
snprintf ( szPipeName, sizeof(szPipeName), "\\\\.\\pipe\\searchd_%d", iPID );
HANDLE hPipe = INVALID_HANDLE_VALUE;
while ( hPipe==INVALID_HANDLE_VALUE )
{
hPipe = CreateFile ( szPipeName, GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL );
if ( hPipe==INVALID_HANDLE_VALUE )
{
if ( GetLastError()!=ERROR_PIPE_BUSY )
{
fprintf ( stdout, "WARNING: could not open pipe (GetLastError()=%d)\n", GetLastError () );
return false;
}
if ( !WaitNamedPipe ( szPipeName, 1000 ) )
{
fprintf ( stdout, "WARNING: could not open pipe (GetLastError()=%d)\n", GetLastError () );
return false;
}
}
}
if ( hPipe!=INVALID_HANDLE_VALUE )
{
DWORD uWritten = 0;
BYTE uWrite = 0;
BOOL bResult = WriteFile ( hPipe, &uWrite, 1, &uWritten, NULL );
if ( bResult )
fprintf ( stdout, "rotating tables: successfully sent SIGHUP to searchd (pid=%d).\n", iPID );
else
fprintf ( stdout, "WARNING: failed to send SIGHUP to searchd (pid=%d, GetLastError()=%d)\n", iPID, GetLastError () );
CloseHandle ( hPipe );
}
#else
// signal
int iErr = kill ( iPID, SIGHUP );
if ( iErr==0 )
{
if ( !g_bQuiet )
fprintf ( stdout, "rotating tables: successfully sent SIGHUP to searchd (pid=%d).\n", iPID );
} else
{
switch ( errno )
{
case ESRCH: fprintf ( stdout, "WARNING: no process found by PID %d.\n", iPID ); break;
case EPERM: fprintf ( stdout, "WARNING: access denied to PID %d.\n", iPID ); break;
default: fprintf ( stdout, "WARNING: kill() error: %s.\n", strerrorm(errno) ); break;
}
return false;
}
#endif
// all ok
return true;
}
static void MakeVersion()
{
const char * szColumnarVer = GetColumnarVersionStr();
CSphString sColumnar = "";
if ( szColumnarVer )
sColumnar.SetSprintf ( " (columnar %s)", szColumnarVer );
const char * szSiVer = GetSecondaryVersionStr();
CSphString sSi = "";
if ( szSiVer )
sSi.SetSprintf ( " (secondary %s)", szSiVer );
const char * szKNNVer = GetKNNVersionStr();
CSphString sKNN = "";
if ( szKNNVer )
sKNN.SetSprintf ( " (knn %s)", szKNNVer );
g_sBannerVersion.SetSprintf ( "%s%s%s%s", szMANTICORE_NAME, sColumnar.cstr(), sSi.cstr(), sKNN.cstr() );
}
static void ShowVersion()
{
fprintf ( stdout, "%s%s", g_sBannerVersion.cstr(), szMANTICORE_BANNER_TEXT );
}
// Built on Linux x86_64 by GNU 8.3.1 compiler.
static void ShowHelp ()
{
fprintf ( stdout,
"Built"
#ifdef OS_UNAME
" on " OS_UNAME
#endif
#ifdef COMPILER
" by " COMPILER " compiler"
#endif
".\n\n"
#ifdef CONFIGURE_FLAGS
CONFIGURE_FLAGS "\n\n"
#endif
"Usage: indexer [OPTIONS] [tablename1 [tablename2 [...]]]\n"
"\n"
"Options are:\n"
"-h, --help\t\tdisplay this help message\n"
"-v, --version\t\tdisplay version information\n"
"--config <file>\t\tread configuration from specified file\n"
"\t\t\t(default is manticore.conf)\n"
"--all\t\t\tbuild all configured tables\n"
"--quiet\t\t\tbe quiet, only print errors\n"
"--noprogress\t\tdo not display progress\n"
"\t\t\t(automatically on if output is not to a tty)\n"
"--rotate\t\tsend SIGHUP to searchd when building is over\n"
"\t\t\tto rotate updated tables automatically\n"
"--sighup-each\t\tsend SIGHUP to searchd after each table\n"
"\t\t\t(used with --rotate only)\n"
"--buildstops <output.txt> <N>\n"
"\t\t\tbuild top N stopwords and write them to given file\n"
"--buildfreqs\t\tstore words frequencies to output.txt\n"
"\t\t\t(used with --buildstops only)\n"
"--merge <dst-table> <src-table>\n"
"\t\t\tmerge 'src-table' into 'dst-table'\n"
"\t\t\t'dst-table' will receive merge result\n"
"\t\t\t'src-table' will not be modified\n"
"--merge-dst-range <attr> <min> <max>\n"
"\t\t\tfilter 'dst-table' on merge, keep only those documents\n"
"\t\t\twhere 'attr' is between 'min' and 'max' (inclusive)\n"
"--drop-src\t\tclears src table after merge\n"
"--dump-rows <FILE>\tdump fetched rows into FILE\n"
"--print-queries\t\tprint SQL queries (for debugging)\n"
"--print-rt\t\tprint processed rows as SQL insert commands and field mapping info for populating an RT table\n"
"--keep-attrs\t\tretain attributes from the old table\n"
"\n"
"Examples:\n"
"indexer --quiet myidx1\tbuild 'myidx1' defined in 'manticore.conf'\n"
"indexer --all\t\tbuild all tables defined in 'manticore.conf'\n" );
}
int main ( int argc, char ** argv )
{
CSphString sOptConfig;
bool bMerge = false;
CSphVector<CSphFilterSettings> dMergeDstFilters;
CSphVector<const char *> dIndexes;
CSphVector<const char *> dWildIndexes;
bool bIndexAll = false;
bool bDropSrc = false;
CSphString sDumpRows;
#if _WIN32
CheckWinInstall();
#endif
CSphString sError, sErrorSI, sErrorKNN;
bool bColumnarError = !InitColumnar ( sError );
bool bSecondaryError = !InitSecondary ( sErrorSI );
bool bKNNError = !InitKNN ( sErrorKNN );
MakeVersion();
if ( argc==2 && ( !strcmp ( argv[1], "--help" ) || !strcmp ( argv[1], "-h" )))
{
ShowHelp();
return 0;
}
if ( argc==2 && ( !strcmp ( argv[1], "--version" ) || !strcmp ( argv[1], "-v" )))
{
ShowVersion();
return 0;
}
int i;
for ( i=1; i<argc; i++ )
{
if ( ( !strcmp ( argv[i], "--config" ) || !strcmp ( argv[i], "-c" ) ) && (i+1)<argc )
{
sOptConfig = argv[++i];
if ( !sphIsReadable ( sOptConfig ) )
sphDie ( "config file '%s' does not exist or is not readable", sOptConfig.cstr() );
} else if ( strcasecmp ( argv[i], "--merge" )==0 && (i+2)<argc )
{
bMerge = true;
dIndexes.Add ( argv[i+1] );
dIndexes.Add ( argv[i+2] );
i += 2;
} else if ( bMerge && strcasecmp ( argv[i], "--merge-dst-range" )==0 && (i+3)<argc )
{
CSphFilterSettings& dLast = dMergeDstFilters.Add();
dLast.m_eType = SPH_FILTER_RANGE;
dLast.m_sAttrName = argv[i+1];
dLast.m_iMinValue = strtoll ( argv[i+2], NULL, 10 );
dLast.m_iMaxValue = strtoll ( argv[i+3], NULL, 10 );
i += 3;
} else if ( strcasecmp ( argv[i], "--buildstops" )==0 && (i+2)<argc )
{
g_sBuildStops = argv[i+1];
g_iTopStops = atoi ( argv[i+2] );
if ( g_iTopStops<=0 )
break;
i += 2;
} else if ( strcasecmp ( argv[i], "--rotate" )==0 )
{
g_bRotate = true;
} else if ( strcasecmp ( argv[i], "--sighup-each" )==0 )
{
g_bRotateEach = true;
} else if ( strcasecmp ( argv[i], "--nohup" )==0 )
{
g_bSendHUP = false;
} else if ( strcasecmp ( argv[i], "--buildfreqs" )==0 )
{
g_bBuildFreqs = true;
} else if ( strcasecmp ( argv[i], "--quiet" )==0 )
{
g_bQuiet = true;
} else if ( strcasecmp ( argv[i], "--noprogress" )==0 )
{
g_bProgress = false;
} else if ( strcasecmp ( argv[i], "--all" )==0 )
{
bIndexAll = true;
} else if ( strcasecmp ( argv[i], "--verbose" )==0 ) // just to prevent warning about unknow option
{
} else if ( isalnum ( argv[i][0] ) || argv[i][0]=='_' || sphIsWild ( argv[i][0] ) )
{
bool bHasWilds = false;
const char * s = argv[i];
while ( *s )
{
if ( sphIsWild(*s) )
{
bHasWilds = true;
break;
}
s++;
}
if ( bHasWilds )
dWildIndexes.Add ( argv[i] );
else
dIndexes.Add ( argv[i] );
} else if ( strcasecmp ( argv[i], "--drop-src" )==0 )
{
bDropSrc = true;
} else if ( strcasecmp ( argv[i], "--dump-rows" )==0 && (i+1)<argc )
{
sDumpRows = argv[++i];
} else if ( strcasecmp ( argv[i], "--print-queries" )==0 )
{
g_bPrintQueries = true;
} else if ( strcasecmp ( argv[i], "--print-rt" )==0 )
{
g_bPrintRTQueries = true;
g_bProgress = false;
g_bQuiet = true;
if((i+1)<argc)
{
g_sDumpRtIndex = argv[++i];
}else{
break;
}
} else if ( strncasecmp ( argv[i], "--keep-attrs", 12 )==0 )
{
CSphString sArg ( argv[i] );
if ( sArg.Begins ( "--keep-attrs=" ) )
{
int iKeyLen = sizeof ( "--keep-attrs=" )-1;
g_sKeepAttrsPath = sArg.cstr() + iKeyLen;
}
if ( sArg.Begins ( "--keep-attrs-names=" ) )
{
int iKeyLen = sizeof ( "--keep-attrs-names=" )-1;
sphSplit ( g_dKeepAttrs, sArg.cstr() + iKeyLen, "," );
}
g_bKeepAttrs = true;
} else
break;
}
if ( !g_bQuiet )
ShowVersion();
if ( bColumnarError )
sphWarning ( "Error initializing columnar storage: %s", sError.cstr() );
if ( bSecondaryError )
sphWarning ( "Error initializing secondary index: %s", sErrorSI.cstr() );
if ( bKNNError )
sphWarning ( "Error initializing knn index: %s", sErrorKNN.cstr() );
if ( !isatty ( fileno(stdout) ) )
g_bProgress = false;
if ( i!=argc || argc<2 )
{
if ( argc>1 )
{
fprintf ( stdout, "ERROR: malformed or unknown option near '%s'.\n", argv[i] );
} else
ShowHelp();
return 1;
}
if ( !bMerge && !bIndexAll && !dIndexes.GetLength() && !dWildIndexes.GetLength() )
{
fprintf ( stdout, "ERROR: nothing to do.\n" );
return 1;
}
sphBacktraceSetBinaryName ( argv[0] );
SetSignalHandlers();
///////////////
// load config
///////////////
if ( !sphInitCharsetAliasTable ( sError ) )
sphDie ( "failed to init charset alias table: %s", sError.cstr() );
sphCollationInit ();
SetupLemmatizerBase();
auto hConf = sphLoadConfig ( sOptConfig, !g_bQuiet, sOptConfig );
if ( !hConf ( "source" ) )
sphDie ( "no tables found in config file '%s'", sOptConfig.cstr() );
sphCheckDuplicatePaths ( hConf );
if ( hConf("indexer") && hConf["indexer"]("indexer") )
{
CSphConfigSection & hIndexer = hConf["indexer"]["indexer"];
g_iMemLimit = hIndexer.GetSize ( "mem_limit", g_iMemLimit );
g_iMaxXmlpipe2Field = hIndexer.GetSize ( "max_xmlpipe2_field", g_iMaxXmlpipe2Field );
g_iWriteBuffer = hIndexer.GetSize ( "write_buffer", g_iWriteBuffer );
g_iMaxFileFieldBuffer = Max ( 1024*1024, hIndexer.GetSize ( "max_file_field_buffer", g_iMaxFileFieldBuffer ) );
g_bIgnoreNonPlain = hIndexer.GetBool( "ignore_non_plain", g_bIgnoreNonPlain);
if ( hIndexer("on_file_field_error") )
{
const CSphString & sVal = hIndexer["on_file_field_error"].strval();
if ( sVal=="ignore_field" )
g_eOnFileFieldError = FFE_IGNORE_FIELD;
else if ( sVal=="skip_document" )
g_eOnFileFieldError = FFE_SKIP_DOCUMENT;
else if ( sVal=="fail_index" )
g_eOnFileFieldError = FFE_FAIL_INDEX;
else
sphDie ( "unknown on_field_field_error value (must be one of ignore_field, skip_document, fail_index)" );
}
bool bJsonStrict = g_bJsonStrict;
bool bJsonKeynamesToLowercase = g_bJsonKeynamesToLowercase;
if ( hIndexer("on_json_attr_error") )
{
const CSphString & sVal = hIndexer["on_json_attr_error"].strval();
if ( sVal=="ignore_attr" )
bJsonStrict = false;
else if ( sVal=="fail_index" )
bJsonStrict = true;
else
sphDie ( "unknown on_json_attr_error value (must be one of ignore_attr, fail_index)" );
}
if ( hIndexer("json_autoconv_keynames") )
{
const CSphString & sVal = hIndexer["json_autoconv_keynames"].strval();
if ( sVal=="lowercase" )
bJsonKeynamesToLowercase = true;
else
sphDie ( "unknown json_autoconv_keynames value (must be 'lowercase')" );
}
bool bJsonAutoconvNumbers = ( hIndexer.GetInt ( "json_autoconv_numbers", 0 )!=0 );
sphSetJsonOptions ( bJsonStrict, bJsonAutoconvNumbers, bJsonKeynamesToLowercase );
sphSetThrottling ( hIndexer.GetInt ( "max_iops", 0 ), hIndexer.GetSize ( "max_iosize", 0 ) );
sphAotSetCacheSize ( hIndexer.GetSize ( "lemmatizer_cache", 262144 ) );
}
sphConfigureCommon ( hConf );
// FIXME!!! move to common
if ( hConf ( "searchd" ) && hConf["searchd"]("searchd") && hConf["searchd"]["searchd"] ( "collation_server" ) )
{
CSphString sCollation = hConf["searchd"]["searchd"].GetStr ( "collation_server" );
GlobalCollation () = sphCollationFromName ( sCollation, &sError );
if ( !sError.IsEmpty() )
sphWarning ( "%s", sError.cstr() );
}
/////////////////////
// index each index
////////////////////
FILE * fpDumpRows = NULL;
if ( !bMerge && !sDumpRows.IsEmpty() )
{
fpDumpRows = fopen ( sDumpRows.cstr(), "wb+" );
if ( !fpDumpRows )
sphDie ( "failed to open %s: %s", sDumpRows.cstr(), strerrorm(errno) );
}
for ( auto& tIndex : hConf["index"] )
{
for ( const auto& tWildIndex : dWildIndexes )
{
if ( sphWildcardMatch ( tIndex.first.cstr(), tWildIndex ) )
{
dIndexes.Add ( tIndex.first.cstr() );
// do not add index twice
break;
}
}
}
sphInitIOStats ();
CSphIOStats tIO;
tIO.Start();
int iIndexed = 0;
int iFailed = 0;
if ( bMerge )
{
if ( dIndexes.GetLength()!=2 )
sphDie ( "there must be 2 tables to merge specified" );
if ( !hConf["index"](dIndexes[0]) )
sphDie ( "no merge destination table '%s'", dIndexes[0] );
if ( !hConf["index"](dIndexes[1]) )
sphDie ( "no merge source table '%s'", dIndexes[1] );
bool bLastOk = DoMerge (
hConf["index"][dIndexes[0]], dIndexes[0],
hConf["index"][dIndexes[1]], dIndexes[1], dMergeDstFilters, g_bRotate, bDropSrc );
if ( bLastOk )
iIndexed++;
else
iFailed++;
} else if ( bIndexAll )
{
uint64_t tmRotated = sphMicroTimer();
for ( const auto& tIndex : hConf["index"] )
{
bool bLastOk = DoIndex ( tIndex.second, tIndex.first.cstr(), hConf["source"], fpDumpRows );
if ( bLastOk && ( sphMicroTimer() - tmRotated > ROTATE_MIN_INTERVAL ) && g_bSendHUP && SendRotate ( hConf, false ) )
tmRotated = sphMicroTimer();
if ( bLastOk )
iIndexed++;
}
} else
{
uint64_t tmRotated = sphMicroTimer();
ARRAY_FOREACH ( j, dIndexes )
{
if ( !hConf["index"](dIndexes[j]) )
fprintf ( stdout, "WARNING: no such table '%s', skipping.\n", dIndexes[j] );
else
{
bool bLastOk = DoIndex ( hConf["index"][dIndexes[j]], dIndexes[j], hConf["source"], fpDumpRows);
if ( bLastOk && ( sphMicroTimer() - tmRotated > ROTATE_MIN_INTERVAL ) && g_bSendHUP && SendRotate ( hConf, false ) )
tmRotated = sphMicroTimer();
if ( bLastOk )
iIndexed++;
else
iFailed++;
}
}
}
sphShutdownWordforms ();
ShutdownColumnar();
ShutdownSecondary();
ShutdownKNN();
if ( !g_bQuiet )
{
ReportIOStats ( "reads", tIO.m_iReadOps, tIO.m_iReadTime, tIO.m_iReadBytes );
ReportIOStats ( "writes", tIO.m_iWriteOps, tIO.m_iWriteTime, tIO.m_iWriteBytes );
}
tIO.Stop();
sphDoneIOStats();
////////////////////////////
// rotating searchd indices
////////////////////////////
// documentation stated
// 0, everything went ok
// 1, there was a problem while indexing (and if --rotate was specified, it was skipped)
// 2, indexing went ok, but --rotate attempt failed
bool bIndexedOk = ( iIndexed>0 && iFailed==0 ); // if all indexes are ok
int iExitCode = bIndexedOk ? 0 : 1;
if ( bIndexedOk && g_bRotate && g_bSendHUP )
{
if ( !SendRotate ( hConf, true ) )
{
fprintf ( stdout, "WARNING: tables NOT rotated.\n" );
iExitCode = 2;
}
}
#if SPH_DEBUG_LEAKS
sphAllocsStats ();
#endif
return iExitCode;
}
| 61,583
|
C++
|
.cpp
| 1,773
| 31.91709
| 213
| 0.65619
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,927
|
sphinxql_second.cpp
|
manticoresoftware_manticoresearch/src/sphinxql_second.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "sphinxql_second.h"
struct BlobLocator_t
{
int m_iStart;
int m_iLen;
};
class SqlSecondParser_c : public SqlParserTraits_c
{
public:
SqlSecondParser_c ( CSphVector<SqlStmt_t>& dStmt, const char* szQuery, CSphString* pError)
: SqlParserTraits_c ( dStmt, szQuery, pError )
{
if ( m_dStmt.IsEmpty() )
PushQuery();
else
m_pStmt = &m_dStmt.Last();
m_sErrorHeader = "P02:";
}
CSphString StrFromBlob ( BlobLocator_t tStr ) const
{
CSphString sResult;
sResult.SetBinary(m_pBuf+tStr.m_iStart, tStr.m_iLen);
return sResult;
}
void SetStatement ( const SqlNode_t & tName, SqlSet_e eSet );
void SetStatement ( const SqlNode_t & tName, SqlSet_e eSet, const RefcountedVector_c<AttrValue_t> & dValues );
};
void SqlSecondParser_c::SetStatement ( const SqlNode_t& tName, SqlSet_e eSet )
{
m_pStmt->m_eStmt = STMT_SET;
m_pStmt->m_eSet = eSet;
ToString ( m_pStmt->m_sSetName, tName );
}
void SqlSecondParser_c::SetStatement ( const SqlNode_t & tName, SqlSet_e eSet, const RefcountedVector_c<AttrValue_t> & dValues )
{
SetStatement ( tName, eSet );
auto & dSV = m_pStmt->m_dSetValues;
dSV.Resize ( dValues.GetLength() );
ARRAY_FOREACH ( i, dValues )
dSV[i] = dValues[i].m_iValue;
}
#define YYSTYPE SqlNode_t
// unused parameter, simply to avoid type clash between all my yylex() functions
#define YY_DECL inline int flex_secondparser ( YYSTYPE* lvalp, void* yyscanner, SqlSecondParser_c* pParser )
#include "flexsphinxql_second.c"
static void yyerror ( SqlParserTraits_c* pParser, const char* szMessage )
{
// flex put a zero at last token boundary; make it undo that
yy5lex_unhold ( pParser->m_pScanner );
pParser->ProcessParsingError ( szMessage );
}
#ifndef NDEBUG
// using a proxy to be possible to debug inside yylex
inline int yylex ( YYSTYPE * lvalp, SqlSecondParser_c * pParser )
{
int res = flex_secondparser ( lvalp, pParser->m_pScanner, pParser );
return res;
}
#else
inline int yylex ( YYSTYPE * lvalp, SqlSecondParser_c * pParser )
{
return flex_secondparser ( lvalp, pParser->m_pScanner, pParser );
}
#endif
#include "bissphinxql_second.c"
ParseResult_e ParseSecond ( Str_t sQuery, CSphVector<SqlStmt_t>& dStmt, CSphString& sError )
{
assert ( IsFilled ( sQuery ) );
SqlSecondParser_c tParser ( dStmt, sQuery.first, &sError );
tParser.m_pBuf = sQuery.first;
char * sEnd = const_cast<char *>( sQuery.first+sQuery.second );
sEnd[0] = 0; // prepare for yy_scan_buffer
sEnd[1] = 0; // this is ok because string allocates a small gap
yy5lex_init ( &tParser.m_pScanner );
YY_BUFFER_STATE tLexerBuffer = yy5_scan_buffer ( const_cast<char *>( sQuery.first ), sQuery.second+2, tParser.m_pScanner );
if ( !tLexerBuffer )
{
sError = "internal error: yy5_scan_buffer() failed";
return ParseResult_e::PARSE_ERROR;
}
int iRes = yyparse ( &tParser );
yy5_delete_buffer ( tLexerBuffer, tParser.m_pScanner );
yy5lex_destroy ( tParser.m_pScanner );
return ( iRes || dStmt.IsEmpty() ) ? ParseResult_e::PARSE_ERROR : ParseResult_e::PARSE_OK;
}
| 3,492
|
C++
|
.cpp
| 96
| 34.447917
| 128
| 0.73539
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,928
|
columnargrouper.cpp
|
manticoresoftware_manticoresearch/src/columnargrouper.cpp
|
//
// Copyright (c) 2020-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "columnargrouper.h"
#include "sphinxsort.h"
#include "grouper.h"
template <typename T>
static inline void FetchMVAValues ( const std::unique_ptr<columnar::Iterator_i> & pIterator, CSphVector<SphGroupKey_t> & dKeys, const CSphMatch & tMatch )
{
const BYTE * pMVA = nullptr;
int iLen = pIterator->Get ( tMatch.m_tRowID, pMVA );
int iNumValues = iLen/sizeof(T);
auto pValues = (const T*)pMVA;
dKeys.Resize(iNumValues);
for ( int i = 0; i < iNumValues; i++ )
dKeys[i] = (SphGroupKey_t)pValues[i];
}
bool NextSet ( CSphFixedVector<int> & dSet, const CSphFixedVector<CSphVector<int64_t>> & dAllKeys )
{
for ( int i = 0; i < dSet.GetLength(); i++ )
{
int iMaxValues = dAllKeys[i].GetLength();
if ( !iMaxValues )
continue;
dSet[i]++;
if ( dSet[i]>=iMaxValues )
dSet[i] = 0;
else
return true;
}
return false;
}
/////////////////////////////////////////////////////////////////////
class GrouperColumnarInt_c : public CSphGrouper
{
public:
GrouperColumnarInt_c ( const CSphString & sName, ESphAttr eType );
GrouperColumnarInt_c ( const GrouperColumnarInt_c & rhs );
void GetLocator ( CSphAttrLocator & tOut ) const final {}
ESphAttr GetResultType () const final { return m_eAttrType; }
SphGroupKey_t KeyFromMatch ( const CSphMatch & tMatch ) const final { return m_pIterator->Get ( tMatch.m_tRowID ); }
SphGroupKey_t KeyFromValue ( SphAttr_t ) const final { assert(0); return SphGroupKey_t(); }
void MultipleKeysFromMatch ( const CSphMatch & tMatch, CSphVector<SphGroupKey_t> & dKeys ) const override { assert(0); }
void SetColumnar ( const columnar::Columnar_i * pColumnar ) final;
CSphGrouper * Clone() const final { return new GrouperColumnarInt_c(*this); }
private:
CSphString m_sAttrName;
ESphAttr m_eAttrType = SPH_ATTR_INTEGER;
std::unique_ptr<columnar::Iterator_i> m_pIterator;
};
GrouperColumnarInt_c::GrouperColumnarInt_c ( const CSphString & sName, ESphAttr eType )
: m_sAttrName ( sName )
, m_eAttrType ( eType )
{}
GrouperColumnarInt_c::GrouperColumnarInt_c ( const GrouperColumnarInt_c & rhs )
: m_sAttrName ( rhs.m_sAttrName )
, m_eAttrType ( rhs.m_eAttrType )
{}
void GrouperColumnarInt_c::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
assert(pColumnar);
std::string sError; // fixme! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sAttrName.cstr(), sError );
}
//////////////////////////////////////////////////////////////////////////
template <typename HASH>
class GrouperColumnarString_T : public CSphGrouper, public HASH
{
public:
GrouperColumnarString_T ( const CSphString & sName, ESphCollation eCollation );
GrouperColumnarString_T ( const GrouperColumnarString_T & rhs );
void GetLocator ( CSphAttrLocator & tOut ) const final {}
ESphAttr GetResultType () const final { return SPH_ATTR_BIGINT; }
SphGroupKey_t KeyFromMatch ( const CSphMatch & tMatch ) const final;
SphGroupKey_t KeyFromValue ( SphAttr_t ) const final { assert(0); return SphGroupKey_t(); }
void MultipleKeysFromMatch ( const CSphMatch & tMatch, CSphVector<SphGroupKey_t> & dKeys ) const final { assert(0); }
void SetColumnar ( const columnar::Columnar_i * pColumnar ) final;
CSphGrouper * Clone() const final;
private:
CSphString m_sAttrName;
ESphCollation m_eCollation = SPH_COLLATION_DEFAULT;
bool m_bHasHashes = false;
std::unique_ptr<columnar::Iterator_i> m_pIterator;
};
template <typename HASH>
GrouperColumnarString_T<HASH>::GrouperColumnarString_T ( const CSphString & sName, ESphCollation eCollation )
: m_sAttrName ( sName )
, m_eCollation ( eCollation )
{}
template <typename HASH>
GrouperColumnarString_T<HASH>::GrouperColumnarString_T ( const GrouperColumnarString_T & rhs )
: m_sAttrName ( rhs.m_sAttrName )
, m_eCollation ( rhs.m_eCollation )
{}
template <typename HASH>
SphGroupKey_t GrouperColumnarString_T<HASH>::KeyFromMatch ( const CSphMatch & tMatch ) const
{
if ( m_bHasHashes )
return m_pIterator->Get ( tMatch.m_tRowID );
const BYTE * pStr = nullptr;
int iLen = m_pIterator->Get ( tMatch.m_tRowID, pStr );
if ( !iLen )
return 0;
return HASH::Hash ( pStr, iLen );
}
template <typename HASH>
void GrouperColumnarString_T<HASH>::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
assert(pColumnar);
columnar::IteratorHints_t tHints;
columnar::IteratorCapabilities_t tCapabilities;
tHints.m_bNeedStringHashes = m_eCollation==SPH_COLLATION_DEFAULT;
std::string sError; // fixme! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sAttrName.cstr(), sError, tHints, &tCapabilities );
m_bHasHashes = tCapabilities.m_bStringHashes;
}
template <typename HASH>
CSphGrouper * GrouperColumnarString_T<HASH>::Clone() const
{
return new GrouperColumnarString_T<HASH>(*this);
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
class GrouperColumnarMVA_T : public CSphGrouper
{
public:
GrouperColumnarMVA_T ( const CSphString & sName ) : m_sAttrName ( sName ) {}
GrouperColumnarMVA_T ( const GrouperColumnarMVA_T & rhs ) : m_sAttrName ( rhs.m_sAttrName ) {}
void GetLocator ( CSphAttrLocator & tOut ) const final {}
ESphAttr GetResultType () const final { return SPH_ATTR_BIGINT; }
SphGroupKey_t KeyFromMatch ( const CSphMatch & tMatch ) const final { assert(0); return SphGroupKey_t(); }
void MultipleKeysFromMatch ( const CSphMatch & tMatch, CSphVector<SphGroupKey_t> & dKeys ) const final { FetchMVAValues<T> ( m_pIterator, dKeys, tMatch ); }
SphGroupKey_t KeyFromValue ( SphAttr_t ) const final { assert(0); return SphGroupKey_t(); }
void SetColumnar ( const columnar::Columnar_i * pColumnar ) final;
CSphGrouper * Clone() const final { return new GrouperColumnarMVA_T(*this); }
bool IsMultiValue() const final { return true; }
private:
CSphString m_sAttrName;
std::unique_ptr<columnar::Iterator_i> m_pIterator;
};
template <typename T>
void GrouperColumnarMVA_T<T>::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
assert(pColumnar);
std::string sError; // fixme! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sAttrName.cstr(),sError );
}
//////////////////////////////////////////////////////////////////////////
template <class HASH>
class GrouperColumnarMulti final : public CSphGrouper, public HASH
{
public:
GrouperColumnarMulti ( const CSphVector<CSphColumnInfo> & dAttrs, ESphCollation eCollation );
SphGroupKey_t KeyFromMatch ( const CSphMatch & tMatch ) const final;
void SetColumnar ( const columnar::Columnar_i * pColumnar ) final;
CSphGrouper * Clone() const final { return new GrouperColumnarMulti<HASH> ( m_dAttrs, m_eCollation ); }
void MultipleKeysFromMatch ( const CSphMatch & tMatch, CSphVector<SphGroupKey_t> & dKeys ) const final;
SphGroupKey_t KeyFromValue ( SphAttr_t ) const final { assert(0); return SphGroupKey_t(); }
void GetLocator ( CSphAttrLocator & ) const final { assert(0); }
ESphAttr GetResultType() const final { return SPH_ATTR_BIGINT; }
bool IsMultiValue() const final;
private:
CSphVector<CSphColumnInfo> m_dAttrs;
ESphCollation m_eCollation = SPH_COLLATION_DEFAULT;
CSphVector<std::unique_ptr<columnar::Iterator_i>> m_dIterators;
CSphVector<bool> m_dHaveStringHashes;
const columnar::Columnar_i * m_pColumnar = nullptr;
SphGroupKey_t FetchStringHash ( int iAttr, const CSphMatch & tMatch, SphGroupKey_t tPrevKey ) const;
SphGroupKey_t FetchStringHash ( int iAttr, const CSphMatch & tMatch ) const;
void SpawnIterators();
};
template <class HASH>
GrouperColumnarMulti<HASH>::GrouperColumnarMulti ( const CSphVector<CSphColumnInfo> & dAttrs, ESphCollation eCollation )
: m_dAttrs ( dAttrs )
, m_eCollation ( eCollation )
{
assert ( dAttrs.GetLength()>1 );
}
template <class HASH>
SphGroupKey_t GrouperColumnarMulti<HASH>::KeyFromMatch ( const CSphMatch & tMatch ) const
{
auto tKey = ( SphGroupKey_t ) SPH_FNV64_SEED;
for ( int i=0; i<m_dAttrs.GetLength(); i++ )
{
auto & pIterator = m_dIterators[i];
if ( m_dAttrs[i].m_eAttrType==SPH_ATTR_STRING || m_dAttrs[i].m_eAttrType==SPH_ATTR_STRINGPTR )
tKey = FetchStringHash ( i, tMatch, tKey );
else
tKey = ( SphGroupKey_t ) sphFNV64 ( pIterator->Get ( tMatch.m_tRowID ), tKey );
}
return tKey;
}
template <class HASH>
void GrouperColumnarMulti<HASH>::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
CSphGrouper::SetColumnar ( pColumnar );
m_pColumnar = pColumnar;
SpawnIterators();
}
template <class HASH>
void GrouperColumnarMulti<HASH>::MultipleKeysFromMatch ( const CSphMatch & tMatch, CSphVector<SphGroupKey_t> & dKeys ) const
{
dKeys.Resize(0);
CSphFixedVector<CSphVector<SphGroupKey_t>> dAllKeys { m_dAttrs.GetLength() };
for ( int i=0; i<m_dAttrs.GetLength(); i++ )
{
auto & dCurKeys = dAllKeys[i];
auto & pIterator = m_dIterators[i];
switch ( m_dAttrs[i].m_eAttrType )
{
case SPH_ATTR_UINT32SET:
FetchMVAValues<DWORD> ( pIterator, dKeys, tMatch );
break;
case SPH_ATTR_INT64SET:
FetchMVAValues<int64_t> ( pIterator, dKeys, tMatch );
break;
case SPH_ATTR_STRING:
case SPH_ATTR_STRINGPTR:
{
SphGroupKey_t tStringKey = FetchStringHash ( i, tMatch );
if ( tStringKey!=(SphGroupKey_t)SPH_FNV64_SEED )
dCurKeys.Add ( tStringKey );
}
break;
default:
dCurKeys.Add ( pIterator->Get ( tMatch.m_tRowID ) );
break;
}
}
CSphFixedVector<int> dIndexes { m_dAttrs.GetLength() };
dIndexes.ZeroVec();
do
{
auto tKey = ( SphGroupKey_t ) SPH_FNV64_SEED;
ARRAY_FOREACH ( i, dAllKeys )
if ( dAllKeys[i].GetLength() )
tKey = (SphGroupKey_t)sphFNV64 ( dAllKeys[i][dIndexes[i]], tKey );
dKeys.Add(tKey);
}
while ( NextSet ( dIndexes, dAllKeys ) );
}
template <class HASH>
bool GrouperColumnarMulti<HASH>::IsMultiValue() const
{
return m_dAttrs.any_of ( []( auto & tAttr ){ return tAttr.m_eAttrType==SPH_ATTR_JSON || tAttr.m_eAttrType==SPH_ATTR_UINT32SET || tAttr.m_eAttrType==SPH_ATTR_INT64SET; } );
}
template <class HASH>
SphGroupKey_t GrouperColumnarMulti<HASH>::FetchStringHash ( int iAttr, const CSphMatch & tMatch, SphGroupKey_t tPrevKey ) const
{
auto & pIterator = m_dIterators[iAttr];
if ( m_dHaveStringHashes[iAttr] )
return sphFNV64 ( pIterator->Get ( tMatch.m_tRowID ), tPrevKey );
const BYTE * pStr = nullptr;
int iLen = pIterator->Get ( tMatch.m_tRowID, pStr );
if ( !iLen )
return tPrevKey;
return HASH::Hash ( pStr, iLen, tPrevKey );
}
template <class HASH>
SphGroupKey_t GrouperColumnarMulti<HASH>::FetchStringHash ( int iAttr, const CSphMatch & tMatch ) const
{
auto & pIterator = m_dIterators[iAttr];
if ( m_dHaveStringHashes[iAttr] )
return pIterator->Get ( tMatch.m_tRowID );
const BYTE * pStr = nullptr;
int iLen = pIterator->Get ( tMatch.m_tRowID, pStr );
if ( !iLen )
return SPH_FNV64_SEED;
return HASH::Hash ( pStr, iLen, SPH_FNV64_SEED );
}
template <class HASH>
void GrouperColumnarMulti<HASH>::SpawnIterators()
{
m_dHaveStringHashes.Resize ( m_dAttrs.GetLength() );
m_dHaveStringHashes.Fill(false);
m_dIterators.Resize ( m_dAttrs.GetLength() );
ARRAY_FOREACH ( i, m_dAttrs )
{
const auto & tAttr = m_dAttrs[i];
assert ( tAttr.IsColumnar() || tAttr.IsColumnarExpr() );
std::string sError; // fixme! report errors
if ( tAttr.m_eAttrType==SPH_ATTR_STRING || tAttr.m_eAttrType==SPH_ATTR_STRINGPTR )
{
columnar::IteratorHints_t tHints;
columnar::IteratorCapabilities_t tCapabilities;
tHints.m_bNeedStringHashes = m_eCollation==SPH_COLLATION_DEFAULT;
m_dIterators[i] = CreateColumnarIterator ( m_pColumnar, tAttr.m_sName.cstr(), sError, tHints, &tCapabilities );
m_dHaveStringHashes[i] = tCapabilities.m_bStringHashes;
}
else
m_dIterators[i] = CreateColumnarIterator ( m_pColumnar, tAttr.m_sName.cstr(), sError );
}
}
//////////////////////////////////////////////////////////////////////////
CSphGrouper * CreateGrouperColumnarInt ( const CSphString & sName, ESphAttr eType )
{
return new GrouperColumnarInt_c ( sName, eType );
}
CSphGrouper * CreateGrouperColumnarString ( const CSphString & sName, ESphCollation eCollation )
{
switch ( eCollation )
{
case SPH_COLLATION_UTF8_GENERAL_CI: return new GrouperColumnarString_T<Utf8CIHash_fn> ( sName, eCollation );
case SPH_COLLATION_LIBC_CI: return new GrouperColumnarString_T<LibcCIHash_fn> ( sName, eCollation );
case SPH_COLLATION_LIBC_CS: return new GrouperColumnarString_T<LibcCSHash_fn> ( sName, eCollation );
default: return new GrouperColumnarString_T<BinaryHash_fn> ( sName, eCollation );
}
}
CSphGrouper * CreateGrouperColumnarMulti ( const CSphVector<CSphColumnInfo> & dAttrs, ESphCollation eCollation )
{
switch ( eCollation )
{
case SPH_COLLATION_UTF8_GENERAL_CI: return new GrouperColumnarMulti<Utf8CIHash_fn> ( dAttrs, eCollation );
case SPH_COLLATION_LIBC_CI: return new GrouperColumnarMulti<LibcCIHash_fn> ( dAttrs, eCollation );
case SPH_COLLATION_LIBC_CS: return new GrouperColumnarMulti<LibcCSHash_fn> ( dAttrs, eCollation );
default: return new GrouperColumnarMulti<BinaryHash_fn> ( dAttrs, eCollation );
}
}
CSphGrouper * CreateGrouperColumnarMVA ( const CSphString & sName, ESphAttr eType )
{
if ( eType==SPH_ATTR_UINT32SET || eType==SPH_ATTR_UINT32SET_PTR )
return new GrouperColumnarMVA_T<DWORD>(sName);
return new GrouperColumnarMVA_T<int64_t>(sName);
}
//////////////////////////////////////////////////////////////////////////
class DistinctFetcherColumnar_c : public DistinctFetcher_i
{
public:
DistinctFetcherColumnar_c ( const CSphString & sName ) : m_sName(sName) {}
void SetBlobPool ( const BYTE * pBlobPool ) override {}
void FixupLocators ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override {}
protected:
CSphString m_sName;
std::unique_ptr<columnar::Iterator_i> m_pIterator;
};
class DistinctFetcherColumnarPlain_c : public DistinctFetcherColumnar_c
{
using DistinctFetcherColumnar_c::DistinctFetcherColumnar_c;
public:
void GetKeys ( const CSphMatch & tMatch, CSphVector<SphAttr_t> & dKeys ) const override { assert ( 0 && " Requesting multiple keys from plain distinct fetcher" ); }
bool IsMultiValue() const override { return false; }
};
class DistinctFetcherColumnarMulti_c : public DistinctFetcherColumnar_c
{
using DistinctFetcherColumnar_c::DistinctFetcherColumnar_c;
public:
SphAttr_t GetKey ( const CSphMatch & tMatch ) const override { assert ( 0 && " Requesting single keys from multi distinct fetcher" ); return 0; }
bool IsMultiValue() const override { return true; }
};
class DistinctFetcherColumnarInt_c : public DistinctFetcherColumnarPlain_c
{
using DistinctFetcherColumnarPlain_c::DistinctFetcherColumnarPlain_c;
public:
SphAttr_t GetKey ( const CSphMatch & tMatch ) const override { return m_pIterator->Get ( tMatch.m_tRowID ); }
void SetColumnar ( const columnar::Columnar_i * pColumnar ) override;
DistinctFetcher_i * Clone() const override { return new DistinctFetcherColumnarInt_c(m_sName); }
};
void DistinctFetcherColumnarInt_c::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
assert(pColumnar);
std::string sError; // fixme! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sName.cstr(), sError );
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
class DistinctFetcherColumnarMva_T : public DistinctFetcherColumnarMulti_c
{
using DistinctFetcherColumnarMulti_c::DistinctFetcherColumnarMulti_c;
public:
void GetKeys ( const CSphMatch & tMatch, CSphVector<SphAttr_t> & dKeys ) const override;
void SetColumnar ( const columnar::Columnar_i * pColumnar ) override;
DistinctFetcher_i * Clone() const override { return new DistinctFetcherColumnarMva_T(m_sName); }
};
template <typename T>
void DistinctFetcherColumnarMva_T<T>::GetKeys ( const CSphMatch & tMatch, CSphVector<SphAttr_t> & dKeys ) const
{
const BYTE * pMVA = nullptr;
int iLen = m_pIterator->Get ( tMatch.m_tRowID, pMVA );
int iNumValues = iLen/sizeof(T);
auto pValues = (const T*)pMVA;
dKeys.Resize(iNumValues);
for ( int i = 0; i < iNumValues; i++ )
dKeys[i] = (SphGroupKey_t)pValues[i];
}
template <typename T>
void DistinctFetcherColumnarMva_T<T>::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
assert(pColumnar);
std::string sError; // fixme! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sName.cstr(), sError );
}
//////////////////////////////////////////////////////////////////////////
template <typename HASH>
class DistinctFetcherColumnarString_T : public DistinctFetcherColumnarPlain_c, public HASH
{
using DistinctFetcherColumnarPlain_c::DistinctFetcherColumnarPlain_c;
public:
SphAttr_t GetKey ( const CSphMatch & tMatch ) const override;
void SetColumnar ( const columnar::Columnar_i * pColumnar ) override;
DistinctFetcher_i * Clone() const override { return new DistinctFetcherColumnarString_T<HASH>(m_sName); }
private:
bool m_bHasHashes = false;
};
template <typename HASH>
SphAttr_t DistinctFetcherColumnarString_T<HASH>::GetKey ( const CSphMatch & tMatch ) const
{
if ( m_bHasHashes )
return m_pIterator->Get ( tMatch.m_tRowID );
const BYTE * pStr = nullptr;
int iLen = m_pIterator->Get ( tMatch.m_tRowID, pStr );
if ( !iLen )
return 0;
return HASH::Hash ( pStr, iLen );
}
template <typename HASH>
void DistinctFetcherColumnarString_T<HASH>::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
assert(pColumnar);
columnar::IteratorHints_t tHints;
columnar::IteratorCapabilities_t tCapabilities;
tHints.m_bNeedStringHashes = true;
std::string sError; // fixme! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sName.cstr(), sError, tHints, &tCapabilities );
m_bHasHashes = tCapabilities.m_bStringHashes;
}
//////////////////////////////////////////////////////////////////////////
DistinctFetcher_i * CreateColumnarDistinctFetcher ( const CSphString & sName, ESphAttr eType, ESphCollation eCollation )
{
switch ( eType )
{
case SPH_ATTR_STRING:
{
switch ( eCollation )
{
case SPH_COLLATION_UTF8_GENERAL_CI: return new DistinctFetcherColumnarString_T<Utf8CIHash_fn>(sName);
case SPH_COLLATION_LIBC_CI: return new DistinctFetcherColumnarString_T<LibcCIHash_fn>(sName);
case SPH_COLLATION_LIBC_CS: return new DistinctFetcherColumnarString_T<LibcCSHash_fn>(sName);
default: return new DistinctFetcherColumnarString_T<BinaryHash_fn>(sName);
}
}
case SPH_ATTR_UINT32SET:return new DistinctFetcherColumnarMva_T<DWORD>(sName);
case SPH_ATTR_INT64SET: return new DistinctFetcherColumnarMva_T<int64_t>(sName);
default: return new DistinctFetcherColumnarInt_c(sName);
}
}
| 19,015
|
C++
|
.cpp
| 451
| 39.997783
| 172
| 0.726485
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,929
|
exprdatetime.cpp
|
manticoresoftware_manticoresearch/src/exprdatetime.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "exprdatetime.h"
#include "exprtraits.h"
static bool g_bUseUTC = false;
static FORCE_INLINE int FormatTime ( time_t tTime, const BYTE ** ppStr )
{
cctz::civil_second tLocTime = ConvertTime(tTime);
CSphString sVal;
sVal.SetSprintf ( "%02d:%02d:%02d", tLocTime.hour(), tLocTime.minute(), tLocTime.second() );
int iLength = sVal.Length();
*ppStr = (const BYTE*) sVal.Leak();
return iLength;
}
static FORCE_INLINE int FormatDate ( time_t tTime, const BYTE ** ppStr )
{
cctz::civil_second tLocTime = ConvertTime(tTime);
CSphString sVal;
sVal.SetSprintf ( "%04d-%02d-%02d", (int)tLocTime.year(), tLocTime.month(), tLocTime.day() );
int iLength = sVal.Length();
*ppStr = (const BYTE*) sVal.Leak();
return iLength;
}
static FORCE_INLINE int FormatDayName ( time_t tTime, const BYTE ** ppStr )
{
static const char * dWeekDays[] =
{
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
};
CSphString sVal = dWeekDays[GetWeekDay ( ConvertTime(tTime), true ) - 1];
int iLength = sVal.Length();
*ppStr = (const BYTE*) sVal.Leak();
return iLength;
}
static FORCE_INLINE int FormatMonthName ( time_t tTime, const BYTE ** ppStr )
{
static const char * dMonths[] =
{
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
};
CSphString sVal = dMonths[ConvertTime(tTime).month() - 1];
int iLength = sVal.Length();
*ppStr = (const BYTE*) sVal.Leak();
return iLength;
}
/////////////////////////////////////////////////////////////////////
template<bool UTC>
class TimeTraits_T
{
protected:
FORCE_INLINE cctz::civil_second GetConvertedTime ( ISphExpr * pExpr, const CSphMatch & tMatch ) const { return ConvertGroupbyTime<UTC> ( (time_t)pExpr->Int64Eval(tMatch) ); }
};
template<bool UTC>
class Expr_TimeTraits_T : public Expr_Unary_c, public TimeTraits_T<UTC>
{
using Expr_Unary_c::Expr_Unary_c;
public:
float Eval ( const CSphMatch & tMatch ) const final { return (float)Int64Eval(tMatch); }
int IntEval ( const CSphMatch & tMatch ) const final { return (int)Int64Eval(tMatch); }
protected:
FORCE_INLINE cctz::civil_second GetConvertedTime ( const CSphMatch & tMatch ) const { return TimeTraits_T<UTC>::GetConvertedTime ( m_pFirst, tMatch ); }
};
template<bool UTC>
class Expr_TimeTraitsBinary_T : public Expr_Binary_c, public TimeTraits_T<UTC>
{
using Expr_Binary_c::Expr_Binary_c;
public:
float Eval ( const CSphMatch & tMatch ) const final { return (float)Int64Eval(tMatch); }
int IntEval ( const CSphMatch & tMatch ) const final { return (int)Int64Eval(tMatch); }
protected:
FORCE_INLINE cctz::civil_second GetConvertedTime ( const CSphMatch & tMatch ) const { return TimeTraits_T<UTC>::GetConvertedTime ( m_pFirst, tMatch ); }
};
class Expr_Now_c : public Expr_NoLocator_c
{
public:
Expr_Now_c ( int iNow ) : m_iNow ( iNow ) {}
int IntEval ( const CSphMatch & ) const final { return m_iNow; }
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)IntEval ( tMatch ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_Now_c");
CALC_POD_HASH(m_iNow);
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final { return new Expr_Now_c ( *this ); }
private:
int m_iNow {0};
Expr_Now_c ( const Expr_Now_c & rhs) : m_iNow (rhs.m_iNow) {}
};
template<bool UTC, bool DATE>
class Expr_CurTime_T : public ISphExpr
{
public:
int IntEval ( const CSphMatch & ) const final
{
time_t tTime = time(nullptr);
if constexpr ( UTC )
return tTime;
return PackLocalTimeAsUTC(tTime);
}
int StringEval ( const CSphMatch &, const BYTE ** ppStr ) const override
{
cctz::civil_second tLocTime;
time_t tTime = time(nullptr);
if constexpr ( UTC )
tLocTime = ConvertTimeUTC(tTime);
else
tLocTime = ConvertTime(tTime);
CSphString sVal;
if constexpr ( DATE )
sVal.SetSprintf ( "%04d-%02d-%02d %02d:%02d:%02d", (int)tLocTime.year(), tLocTime.month(), tLocTime.day(), tLocTime.hour(), tLocTime.minute(), tLocTime.second() );
else
sVal.SetSprintf ( "%02d:%02d:%02d", tLocTime.hour(), tLocTime.minute(), tLocTime.second() );
int iLength = sVal.Length();
*ppStr = (const BYTE*) sVal.Leak();
return iLength;
}
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)IntEval ( tMatch ); }
bool IsDataPtrAttr () const final { return true; }
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) final {}
ISphExpr * Clone() const override { return new Expr_CurTime_T; }
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & bDisable ) final
{
bDisable = true;
return 0;
}
};
class Expr_CurDate_c : public Expr_CurTime_T<false,false>
{
public:
int StringEval ( const CSphMatch &, const BYTE ** ppStr ) const final { return FormatDate ( time(nullptr), ppStr ); }
ISphExpr * Clone() const final { return new Expr_CurDate_c; }
};
class Expr_Time_c : public Expr_TimeTraits_T<false>
{
using BASE = Expr_TimeTraits_T<false>;
using BASE::BASE;
public:
Expr_Time_c ( ISphExpr * pExpr ) : BASE ( "Expr_Time_c", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return m_pFirst->IntEval(tMatch); }
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const override { return FormatTime ( m_pFirst->IntEval(tMatch), ppStr ); }
bool IsDataPtrAttr () const final { return true; }
ISphExpr * Clone() const override { return new Expr_Time_c(*this); }
};
class Expr_Date_c : public Expr_TimeTraits_T<false>
{
using BASE = Expr_TimeTraits_T<false>;
using BASE::BASE;
public:
Expr_Date_c ( ISphExpr * pExpr ) : BASE ( "Expr_Date_c", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return m_pFirst->IntEval(tMatch); }
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const override { return FormatDate ( m_pFirst->IntEval(tMatch), ppStr ); }
bool IsDataPtrAttr () const final { return true; }
ISphExpr * Clone() const override { return new Expr_Date_c(*this); }
};
class Expr_StringUnary_c : public Expr_Unary_c
{
using Expr_Unary_c::Expr_Unary_c;
public:
float Eval ( const CSphMatch & ) const final { assert ( 0 && "one just does not simply evaluate a string as float" ); return 0; }
int IntEval ( const CSphMatch & ) const final { assert ( 0 && "one just does not simply evaluate a string as int" ); return 0; }
int64_t Int64Eval ( const CSphMatch & ) const final { assert ( 0 && "one just does not simply evaluate a string as bigint" ); return 0; }
bool IsDataPtrAttr () const final { return true; }
};
class Expr_DayName_c : public Expr_StringUnary_c
{
using Expr_StringUnary_c::Expr_StringUnary_c;
public:
Expr_DayName_c ( ISphExpr * pExpr ) : Expr_StringUnary_c ( "Expr_DayName_c", pExpr ) {}
ISphExpr * Clone() const override { return new Expr_DayName_c(*this); }
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const override { return FormatDayName ( m_pFirst->IntEval(tMatch), ppStr ); }
};
class Expr_MonthName_c : public Expr_StringUnary_c
{
using Expr_StringUnary_c::Expr_StringUnary_c;
public:
Expr_MonthName_c ( ISphExpr * pExpr ) : Expr_StringUnary_c ( "Expr_MonthName_c", pExpr ) {}
ISphExpr * Clone() const override { return new Expr_MonthName_c(*this); }
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const override { return FormatMonthName ( m_pFirst->IntEval(tMatch), ppStr ); }
};
class Expr_TimeDiff_c : public Expr_Binary_c
{
public:
Expr_TimeDiff_c ( ISphExpr * pFirst, ISphExpr * pSecond )
: Expr_Binary_c ( "Expr_TimeDiff_c", pFirst, pSecond )
{}
int IntEval ( const CSphMatch & tMatch ) const final
{
assert ( m_pFirst && m_pSecond );
return m_pFirst->IntEval ( tMatch )-m_pSecond->IntEval ( tMatch );
}
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
int iVal = IntEval ( tMatch );
CSphString sVal;
int t = iVal<0 ? -iVal : iVal;
sVal.SetSprintf ( "%s%02d:%02d:%02d", iVal<0 ? "-" : "", t/60/60, (t/60)%60, t%60 );
int iLength = sVal.Length();
*ppStr = (const BYTE*) sVal.Leak();
return iLength;
}
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)IntEval ( tMatch ); }
bool IsDataPtrAttr() const final { return true; }
ISphExpr * Clone() const final { return new Expr_TimeDiff_c ( *this ); }
};
class Expr_DateDiff_c : public Expr_Binary_c
{
public:
Expr_DateDiff_c ( ISphExpr * pFirst, ISphExpr * pSecond )
: Expr_Binary_c ( "Expr_DateDiff_c", pFirst, pSecond )
{}
int IntEval ( const CSphMatch & tMatch ) const final
{
assert ( m_pFirst && m_pSecond );
cctz::civil_day tDay1 = cctz::civil_day ( ConvertTime ( m_pFirst->IntEval(tMatch) ) );
cctz::civil_day tDay2 = cctz::civil_day ( ConvertTime ( m_pSecond->IntEval(tMatch) ) );
return tDay1-tDay2;
}
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)IntEval ( tMatch ); }
ISphExpr * Clone() const final { return new Expr_DateDiff_c ( *this ); }
};
class ExprDateFormat_c : public ISphStringExpr
{
public:
ExprDateFormat_c ( ISphExpr * pArg, ISphExpr * pFmt )
: m_pArg ( pArg )
{
assert( pArg );
SafeAddRef( pArg );
CSphMatch tTmp;
const BYTE * sVal = nullptr;
int iLen = pFmt->StringEval ( tTmp, &sVal );
m_sFmt = CSphString ( (const char*)sVal, iLen );
FreeDataPtr ( pFmt, sVal );
}
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
int64_t iTime = m_pArg->Int64Eval ( tMatch );
// FIXME: modify this to use static buffer (performance issue)
CSphString sRes = FormatTime ( time_t(iTime), m_sFmt.cstr() );
int iLen = sRes.Length();
*ppStr = (const BYTE *)sRes.Leak();
return iLen;
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override
{
if ( m_pArg )
m_pArg->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) override
{
if ( m_pArg )
m_pArg->Command ( eCmd, pArg );
}
bool IsDataPtrAttr() const final
{
return true;
}
bool IsConst () const final { return false; }
ISphExpr * Clone () const final
{
return new ExprDateFormat_c ( *this );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("ExprDateFormat_c");
CALC_CHILD_HASH(m_pArg);
return CALC_DEP_HASHES();
}
private:
ExprDateFormat_c ( const ExprDateFormat_c & rhs )
: m_pArg ( SafeClone ( rhs.m_pArg ) )
, m_sFmt ( rhs.m_sFmt )
{}
CSphRefcountedPtr<ISphExpr> m_pArg;
CSphString m_sFmt;
};
template<bool UTC>
class Expr_Day_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_Day_T ( ISphExpr * pExpr ) : BASE ( "Expr_Day_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return BASE::GetConvertedTime(tMatch).day(); }
ISphExpr * Clone() const final { return new Expr_Day_T(*this); }
};
template<bool UTC>
class Expr_Week_T : public Expr_TimeTraitsBinary_T<UTC>
{
using BASE=Expr_TimeTraitsBinary_T<UTC>;
public:
Expr_Week_T ( ISphExpr * pFirst, ISphExpr * pSecond ) : BASE ( "Expr_Week_T", pFirst, pSecond )
{
if ( !pSecond || pSecond->IsConst() )
{
m_bConst = true;
if ( pSecond )
{
CSphMatch tMatch;
m_uFlags = pSecond->IntEval(tMatch);
}
}
}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final
{
if ( m_bConst )
return CalcWeekNumber ( BASE::GetConvertedTime(tMatch), m_uFlags );
return CalcWeekNumber ( BASE::GetConvertedTime(tMatch), BASE::m_pSecond->IntEval(tMatch) );
}
ISphExpr * Clone() const final { return new Expr_Week_T(*this); }
private:
bool m_bConst = false;
uint32_t m_uFlags = 3;
};
template<bool UTC>
class Expr_Month_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_Month_T ( ISphExpr * pExpr ) : BASE ( "Expr_Month_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return BASE::GetConvertedTime(tMatch).month(); }
ISphExpr * Clone() const final { return new Expr_Month_T(*this); }
};
template<bool UTC>
class Expr_Year_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_Year_T ( ISphExpr * pExpr ) : BASE ( "Expr_Year_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return BASE::GetConvertedTime(tMatch).year(); }
ISphExpr * Clone() const final { return new Expr_Year_T(*this); }
};
template<bool UTC>
class Expr_YearMonth_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_YearMonth_T ( ISphExpr * pExpr ) : BASE ( "Expr_YearMonth_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return CalcYearMonth ( BASE::GetConvertedTime(tMatch) ); }
ISphExpr * Clone() const final { return new Expr_YearMonth_T(*this); }
};
template<bool UTC>
class Expr_YearMonthDay_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_YearMonthDay_T ( ISphExpr * pExpr ) : BASE ( "Expr_YearMonthDay_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return CalcYearMonthDay ( BASE::GetConvertedTime(tMatch) ); }
ISphExpr * Clone() const final { return new Expr_YearMonthDay_T(*this); }
};
template<bool UTC>
class Expr_YearWeek_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_YearWeek_T ( ISphExpr * pExpr ) : BASE ( "Expr_YearWeek_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return CalcYearWeek ( BASE::GetConvertedTime(tMatch) ); }
ISphExpr * Clone() const final { return new Expr_YearWeek_T(*this); }
};
template<bool UTC>
class Expr_Hour_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_Hour_T ( ISphExpr * pExpr ) : BASE ( "Expr_Hour_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return BASE::GetConvertedTime(tMatch).hour(); }
ISphExpr * Clone() const final { return new Expr_Hour_T(*this); }
};
template<bool UTC>
class Expr_Minute_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_Minute_T ( ISphExpr * pExpr ) : BASE ( "Expr_Minute_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return BASE::GetConvertedTime(tMatch).minute(); }
ISphExpr * Clone() const final { return new Expr_Minute_T(*this); }
};
template<bool UTC>
class Expr_Second_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_Second_T ( ISphExpr * pExpr ) : BASE ( "Expr_Second_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return BASE::GetConvertedTime(tMatch).second(); }
ISphExpr * Clone() const final { return new Expr_Second_T(*this); }
};
template<bool UTC>
class Expr_DayOfWeek_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_DayOfWeek_T ( ISphExpr * pExpr ) : BASE ( "Expr_DayOfWeek_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return GetWeekDay ( BASE::GetConvertedTime(tMatch), true ); }
ISphExpr * Clone() const final { return new Expr_DayOfWeek_T(*this); }
};
template<bool UTC>
class Expr_DayOfYear_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_DayOfYear_T ( ISphExpr * pExpr ) : BASE ( "Expr_DayOfYear_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return GetYearDay ( BASE::GetConvertedTime(tMatch) ); }
ISphExpr * Clone() const final { return new Expr_DayOfYear_T(*this); }
};
template<bool UTC>
class Expr_Quarter_T : public Expr_TimeTraits_T<UTC>
{
using BASE=Expr_TimeTraits_T<UTC>;
public:
Expr_Quarter_T ( ISphExpr * pExpr ) : BASE ( "Expr_Quarter_T", pExpr ) {}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return GetQuarter ( BASE::GetConvertedTime(tMatch) ); }
ISphExpr * Clone() const final { return new Expr_Quarter_T(*this); }
};
class Expr_DateAdd_c : public Expr_Binary_c
{
public:
Expr_DateAdd_c ( ISphExpr * pFirst, ISphExpr * pSecond, TimeUnit_e eUnit, bool bAdd );
int IntEval ( const CSphMatch & tMatch ) const final;
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)IntEval ( tMatch ); }
ISphExpr * Clone() const final { return new Expr_DateAdd_c ( *this ); }
private:
TimeUnit_e m_eUnit = TimeUnit_e::SECOND;
bool m_bAdd = true;
bool m_bConst = true;
int m_iAdd = 0;
};
Expr_DateAdd_c::Expr_DateAdd_c ( ISphExpr * pFirst, ISphExpr * pSecond, TimeUnit_e eUnit, bool bAdd )
: Expr_Binary_c ( "Expr_DateAdd_c", pFirst, pSecond )
, m_eUnit ( eUnit )
, m_bAdd ( bAdd )
{
m_bConst = m_pSecond->IsConst();
if ( m_bConst )
{
CSphMatch tMatch;
m_iAdd = m_pSecond->IntEval(tMatch);
}
}
int Expr_DateAdd_c::IntEval ( const CSphMatch & tMatch ) const
{
cctz::civil_second tCS = ConvertTime ( m_pFirst->IntEval(tMatch) );
int iAdd = m_bConst ? m_iAdd : m_pSecond->IntEval(tMatch);
iAdd *= m_bAdd ? 1 : -1;
switch ( m_eUnit )
{
case TimeUnit_e::SECOND:
return ConvertTime ( cctz::civil_second ( tCS.year(), tCS.month(), tCS.day(), tCS.hour(), tCS.minute(), tCS.second() + iAdd ) );
case TimeUnit_e::MINUTE:
return ConvertTime ( cctz::civil_second ( tCS.year(), tCS.month(), tCS.day(), tCS.hour(), tCS.minute() + iAdd, tCS.second() ) );
case TimeUnit_e::HOUR:
return ConvertTime ( cctz::civil_second ( tCS.year(), tCS.month(), tCS.day(), tCS.hour() + iAdd, tCS.minute(), tCS.second() ) );
case TimeUnit_e::DAY:
case TimeUnit_e::WEEK:
{
iAdd *= m_eUnit==TimeUnit_e::WEEK ? 7 : 1;
return ConvertTime ( cctz::civil_second ( tCS.year(), tCS.month(), tCS.day() + iAdd, tCS.hour(), tCS.minute(), tCS.second() ) );
}
case TimeUnit_e::MONTH:
case TimeUnit_e::QUARTER:
{
iAdd *= m_eUnit==TimeUnit_e::QUARTER ? 3 : 1;
const auto tNextMonth = cctz::civil_month(tCS) + iAdd;
const auto tLastDayOfNextMonth = cctz::civil_second ( cctz::civil_day(tNextMonth + 1) - 1 );
const auto tNormalized = cctz::civil_second ( tCS.year(), tCS.month() + iAdd, tCS.day(), tCS.hour(), tCS.minute(), tCS.second() );
return ConvertTime ( std::min ( tNormalized, tLastDayOfNextMonth ) );
}
case TimeUnit_e::YEAR:
return ConvertTime ( cctz::civil_second ( tCS.year() + iAdd, tCS.month(), tCS.day(), tCS.hour(), tCS.minute(), tCS.second() ) );
default:
assert ( 0 && "Unknown time unit" );
return 0;
}
}
void SetGroupingInUTC ( bool bGroupingInUtc )
{
g_bUseUTC = bGroupingInUtc;
}
bool GetGroupingInUTC()
{
return g_bUseUTC;
}
ISphExpr * CreateExprNow ( int iNow )
{
return new Expr_Now_c(iNow);
}
ISphExpr * CreateExprDateFormat ( ISphExpr * pArg, ISphExpr * pFmt )
{
return new ExprDateFormat_c ( pArg, pFmt );
}
ISphExpr * CreateExprCurTime ( bool bUTC, bool bDate )
{
int iIndex = 2*(bUTC ? 1 : 0) + (bDate ? 1 : 0);
switch ( iIndex )
{
case 0: return new Expr_CurTime_T<false,false>;
case 1: return new Expr_CurTime_T<false,true>;
case 2: return new Expr_CurTime_T<true,false>;
case 3: return new Expr_CurTime_T<true,true>;
default:
assert ( 0 && "CreateExprTime error" );
return nullptr;
}
}
ISphExpr * CreateExprCurDate()
{
return new Expr_CurDate_c;
}
ISphExpr * CreateExprTime ( ISphExpr * pArg )
{
return new Expr_Time_c(pArg);
}
ISphExpr * CreateExprDate ( ISphExpr * pArg )
{
return new Expr_Date_c(pArg);
}
ISphExpr * CreateExprDayName ( ISphExpr * pArg )
{
return new Expr_DayName_c(pArg);
}
ISphExpr * CreateExprMonthName ( ISphExpr * pArg )
{
return new Expr_MonthName_c(pArg);
}
ISphExpr * CreateExprTimeDiff ( ISphExpr * pFirst, ISphExpr * pSecond )
{
return new Expr_TimeDiff_c ( pFirst, pSecond );
}
ISphExpr * CreateExprDateDiff ( ISphExpr * pFirst, ISphExpr * pSecond )
{
return new Expr_DateDiff_c ( pFirst, pSecond );
}
ISphExpr * CreateExprDateAdd ( ISphExpr * pFirst, ISphExpr * pSecond, TimeUnit_e eUnit, bool bAdd )
{
return new Expr_DateAdd_c ( pFirst, pSecond, eUnit, bAdd );
}
ISphExpr * CreateExprDay ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_Day_T<true>(pExpr) : (ISphExpr *)new Expr_Day_T<false>(pExpr);
}
ISphExpr * CreateExprWeek ( ISphExpr * pFirst, ISphExpr * pSecond )
{
return g_bUseUTC ? (ISphExpr *)new Expr_Week_T<true>( pFirst, pSecond ) : (ISphExpr *)new Expr_Week_T<false>( pFirst, pSecond );
}
ISphExpr * CreateExprMonth ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_Month_T<true>(pExpr) : (ISphExpr *)new Expr_Month_T<false>(pExpr);
}
ISphExpr * CreateExprYear ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_Year_T<true>(pExpr) : (ISphExpr *)new Expr_Year_T<false>(pExpr);
}
ISphExpr * CreateExprYearMonth ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_YearMonth_T<true>(pExpr) : (ISphExpr *)new Expr_YearMonth_T<false>(pExpr);
}
ISphExpr * CreateExprYearMonthDay ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_YearMonthDay_T<true>(pExpr) : (ISphExpr *)new Expr_YearMonthDay_T<false>(pExpr);
}
ISphExpr * CreateExprYearWeek ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_YearWeek_T<true>(pExpr) : (ISphExpr *)new Expr_YearWeek_T<false>(pExpr);
}
ISphExpr * CreateExprHour ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_Hour_T<true>(pExpr) : (ISphExpr *)new Expr_Hour_T<false>(pExpr);
}
ISphExpr * CreateExprMinute ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_Minute_T<true>(pExpr) : (ISphExpr *)new Expr_Minute_T<false>(pExpr);
}
ISphExpr * CreateExprSecond ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_Second_T<true>(pExpr) : (ISphExpr *)new Expr_Second_T<false>(pExpr);
}
ISphExpr * CreateExprDayOfWeek ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_DayOfWeek_T<true>(pExpr) : (ISphExpr *)new Expr_DayOfWeek_T<false>(pExpr);
}
ISphExpr * CreateExprDayOfYear ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_DayOfYear_T<true>(pExpr) : (ISphExpr *)new Expr_DayOfYear_T<false>(pExpr);
}
ISphExpr * CreateExprQuarter ( ISphExpr * pExpr )
{
return g_bUseUTC ? (ISphExpr *)new Expr_Quarter_T<true>(pExpr) : (ISphExpr *)new Expr_Quarter_T<false>(pExpr);
}
| 23,463
|
C++
|
.cpp
| 618
| 35.804207
| 175
| 0.695102
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,930
|
sphinxpq.cpp
|
manticoresoftware_manticoresearch/src/sphinxpq.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxpq.h"
#include "sphinxsort.h"
#include "fileutils.h"
#include "icu.h"
#include "accumulator.h"
#include "indexsettings.h"
#include "coroutine.h"
#include "mini_timer.h"
#include "binlog.h"
#include "indexfiles.h"
#include "tokenizer/tokenizer.h"
#include "task_dispatcher.h"
#include "stackmock.h"
#include <atomic>
using namespace Threads;
/// protection from concurrent changes during binlog replay
#ifndef NDEBUG
static auto &g_bRTChangesAllowed = RTChangesAllowed ();
#endif
//////////////////////////////////////////////////////////////////////////
// percolate index
struct StoredQuery_t : public StoredQuery_i, public ISphRefcountedMT
{
CSphFixedVector<uint64_t> m_dRejectTerms { 0 };
CSphFixedVector<uint64_t> m_dRejectWilds { 0 };
CSphFixedVector<uint64_t> m_dTags { 0 };
CSphVector<CSphString> m_dSuffixes;
DictMap_t m_hDict;
std::unique_ptr<XQQuery_t> m_pXQ;
int m_iStackRequired = 0;
static int m_iStackBaseRequired; // additional stack which was in use at the moment of measuring
bool m_bOnlyTerms = false; // flag of simple query, ie only words and no operators
bool IsFullscan() const { return m_pXQ->m_bEmpty; }
};
#ifdef NDEBUG
static constexpr int PQ_BASE_STACK = 24 * 1024;
#else
static constexpr int PQ_BASE_STACK = 40 * 1024;
#endif
int StoredQuery_t::m_iStackBaseRequired = PQ_BASE_STACK;
using StoredQuerySharedPtr_t = SharedPtr_t<StoredQuery_t>;
using StoredQuerySharedPtrVecSharedPtr_t = SharedPtr_t<CSphVector<StoredQuerySharedPtr_t>>;
class SharedPQSlice_t : public VecTraits_T<const StoredQuerySharedPtr_t>
{
using BASE_t = VecTraits_T<const StoredQuerySharedPtr_t>;
StoredQuerySharedPtrVecSharedPtr_t m_pBackend;
int64_t m_iGeneration {0};
public:
explicit SharedPQSlice_t ( StoredQuerySharedPtrVecSharedPtr_t pBackend, int64_t iGeneration=0 )
: BASE_t { *pBackend }
, m_pBackend { std::move(pBackend) }
, m_iGeneration ( iGeneration )
{}
SharedPQSlice_t () = default;
SharedPQSlice_t ( SharedPQSlice_t&& rhs ) = default;
SharedPQSlice_t& operator= ( SharedPQSlice_t&& rhs ) = default;
int64_t Generation() const { return m_iGeneration; };
};
static FileAccessSettings_t g_tDummyFASettings;
class PercolateIndex_c final : public PercolateIndex_i
{
public:
PercolateIndex_c ( CSphString sIndexName, CSphString sPath, CSphSchema tSchema );
~PercolateIndex_c () override;
bool AddDocument ( InsertDocData_c & tDoc, bool bReplace, const CSphString & sTokenFilterOptions, CSphString & sError, CSphString & sWarning, RtAccum_t * pAccExt ) override;
bool MatchDocuments ( RtAccum_t * pAccExt, PercolateMatchResult_t &tRes ) override;
bool Commit ( int * pDeleted, RtAccum_t * pAccExt, CSphString* pError = nullptr ) override;
void RollBack ( RtAccum_t * pAccExt ) override;
std::unique_ptr<StoredQuery_i> CreateQuery ( PercolateQueryArgs_t & tArgs, CSphString & sError ) final EXCLUDES ( m_tLock );
bool Prealloc ( bool bStripPath, FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings ) override;
void PostSetup() override EXCLUDES ( m_tLock );
bool BindAccum ( RtAccum_t * pAccExt, CSphString * pError = nullptr ) override;
TokenizerRefPtr_c CloneIndexingTokenizer() const override { return m_pTokenizerIndexing->Clone ( SPH_CLONE_INDEX ); }
void SaveMeta ( bool bShutdown = false ) EXCLUDES ( m_tLock );
void SaveMeta ( const SharedPQSlice_t& dStored, bool bShutdown = false );
enum class LOAD_E { ParseError_e, GeneralError_e, Ok_e };
LOAD_E LoadMetaJson ( const CSphString& sMeta, bool bStripPath, FilenameBuilder_i* pFilenameBuilder, StrVec_t& dWarnings );
LOAD_E LoadMetaLegacy ( const CSphString& sMeta, bool bStripPath, FilenameBuilder_i* pFilenameBuilder, StrVec_t& dWarnings );
bool LoadMeta ( const CSphString& sMeta, bool bStripPath, FilenameBuilder_i* pFilenameBuilder, StrVec_t& dWarnings );
bool Truncate ( CSphString &, Truncate_e eAction ) override EXCLUDES ( m_tLock );
// RT index stub
bool MultiQuery ( CSphQueryResult &, const CSphQuery &, const VecTraits_T<ISphMatchSorter *> &, const CSphMultiQueryArgs & ) const override;
bool DeleteDocument ( const VecTraits_T<DocID_t> &, CSphString & , RtAccum_t * pAccExt ) override { RollBack ( pAccExt ); return true; }
void ForceRamFlush ( const char* szReason ) EXCLUDES ( m_tLock ) final;
bool IsFlushNeed() const override;
bool ForceDiskChunk () override;
bool IsSameSettings ( CSphReconfigureSettings & tSettings, CSphReconfigureSetup & tSetup, StrVec_t & dWarnings, CSphString & sError ) const override;
bool Reconfigure ( CSphReconfigureSetup & tSetup ) override EXCLUDES ( m_tLock );
void ProcessDiskChunk ( int, VisitChunk_fn&& ) const final {};
int64_t GetLastFlushTimestamp() const override { return m_tmSaved; }
// plain index stub
bool EarlyReject ( CSphQueryContext * pCtx, CSphMatch & tMatch ) const override;
const CSphSourceStats & GetStats () const override { return m_tStat; }
void GetStatus ( CSphIndexStatus* pRes ) const final;
void IndexDeleted() override { m_bIndexDeleted = true; }
void ProhibitSave() final;
void EnableSave() final;
void LockFileState ( CSphVector<CSphString> & dFiles ) final;
const CSphSchema &GetMatchSchema () const override { return m_tMatchSchema; }
virtual uint64_t GetSchemaHash () const final { return 0; }
int64_t GetMemLimit() const final { return 0; }
Binlog::CheckTnxResult_t ReplayTxn ( CSphReader & tReader, CSphString & sError, BYTE uOp, Binlog::CheckTxn_fn && fnCanContinue ) final; // cb from binlog
private:
static const DWORD META_HEADER_MAGIC = 0x50535451; ///< magic 'PSTQ' header
// NOTICE! meta version 10 was introduced in 2a6ea8f7 and rolled back to 9 in e1709760.
// if you need to upgrade - skip v10 and use v11.
static constexpr DWORD META_VERSION = 9; // next should be 11
int m_iLockFD = -1;
CSphSourceStats m_tStat;
TokenizerRefPtr_c m_pTokenizerIndexing;
int m_iMaxCodepointLength = 0;
int64_t m_iSavedTID = 0;
int64_t m_tmSaved = 0;
int m_iDisabledCounter = 0;
bool m_bHasFiles = false;
bool m_bIndexDeleted = false;
StoredQuerySharedPtrVecSharedPtr_t m_pQueries GUARDED_BY ( m_tLock );
OpenHashTable_T<int64_t, int> m_hQueries GUARDED_BY ( m_tLock ); // QUID -> query
int64_t m_iGeneration GUARDED_BY ( m_tLock ) { 0 }; // eliminate ABA race on insert/delete
mutable RwLock_t m_tLock;
CSphFixedVector<StoredQueryDesc_t> m_dLoadedQueries { 0 }; // temporary, just descriptions
CSphSchema m_tMatchSchema;
CSphVector<SphWordID_t> m_dHitlessWords;
void DoMatchDocuments ( const RtSegment_t * pSeg, PercolateMatchResult_t & tRes );
bool MultiScan ( CSphQueryResult & tResult, const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter*>& dSorters,
const CSphMultiQueryArgs &tArgs ) const;
bool CanBeAdded ( PercolateQueryArgs_t& tArgs, CSphString& sError ) const REQUIRES_SHARED ( m_tLock );
std::unique_ptr<StoredQuery_i> CreateQuery ( PercolateQueryArgs_t& tArgs, const TokenizerRefPtr_c& pTokenizer, const DictRefPtr_c& pDict, CSphString& sError );
static void CalcNecessaryStack ( StoredQuery_t* pStored, CSphString& sError );
public:
PercolateMatchContext_t * CreateMatchContext ( const RtSegment_t * pSeg, const SegmentReject_t &tReject );
int GetNumOfLocks () const noexcept final;
private:
int ReplayInsertAndDeleteQueries ( const VecTraits_T<StoredQuery_i*>& dNewQueries, const VecTraits_T<int64_t>& dDeleteQueries, const VecTraits_T<uint64_t>& dDeleteTags ) EXCLUDES ( m_tLock );
void GetIndexFiles ( StrVec_t& dFiles, StrVec_t& dExtra, const FilenameBuilder_i* = nullptr ) const override;
Bson_t ExplainQuery ( const CSphString & sQuery ) const final;
StoredQuerySharedPtrVecSharedPtr_t MakeClone () const REQUIRES_SHARED ( m_tLock );
void AddToStoredUnl ( StoredQuerySharedPtr_t tNew ) REQUIRES ( m_tLock );
void PostSetupUnl () REQUIRES ( m_tLock );
SharedPQSlice_t GetStored () const EXCLUDES ( m_tLock );
SharedPQSlice_t GetStoredUnl () const REQUIRES_SHARED ( m_tLock );
bool IsSaveDisabled() const noexcept;
bool NeedStoreWordID () const override { return ( m_tSettings.m_eHitless==SPH_HITLESS_SOME && m_dHitlessWords.GetLength() ); }
bool LoadMetaImpl ( const CSphString& sMeta, bool bStripPath, FilenameBuilder_i* pFilenameBuilder, StrVec_t& dWarnings );
};
// FIXME! Can't define it in the class because it fails to link on clang-15
//////////////////////////////////////////////////////////////////////////
// percolate functions
#define PERCOLATE_BLOOM_WILD_COUNT 32
#define PERCOLATE_BLOOM_SIZE PERCOLATE_BLOOM_WILD_COUNT * 2
#define PERCOLATE_WORDS_PER_CP 128
/// percolate query index factory
std::unique_ptr<PercolateIndex_i> CreateIndexPercolate ( CSphString sIndexName, CSphString sPath, CSphSchema tSchema )
{
MEMORY ( MEM_INDEX_RT );
return std::make_unique<PercolateIndex_c> ( std::move ( sIndexName ), std::move ( sPath ), std::move ( tSchema ) );
}
static SegmentReject_t SegmentGetRejects ( const RtSegment_t * pSeg, bool bBuildInfix, bool bUtf8, ESphHitless eHitless )
{
SegmentReject_t tReject;
tReject.m_iRows = pSeg->m_uRows;
const bool bMultiDocs = ( pSeg->m_uRows>1 );
if ( bMultiDocs )
{
tReject.m_dPerDocTerms.Reset ( pSeg->m_uRows );
if ( bBuildInfix )
{
tReject.m_dPerDocWilds.Reset ( pSeg->m_uRows * PERCOLATE_BLOOM_SIZE );
tReject.m_dPerDocWilds.Fill ( 0 );
}
}
if ( bBuildInfix )
{
tReject.m_dWilds.Reset ( PERCOLATE_BLOOM_SIZE );
tReject.m_dWilds.Fill ( 0 );
}
RtWordReader_c tDict ( pSeg, true, PERCOLATE_WORDS_PER_CP, eHitless );
BloomGenTraits_t tBloom0 ( tReject.m_dWilds.Begin() );
BloomGenTraits_t tBloom1 ( tReject.m_dWilds.Begin() + PERCOLATE_BLOOM_WILD_COUNT );
while ( tDict.UnzipWord() )
{
const auto* pWord = (const RtWord_t*)tDict;
const BYTE * pDictWord = pWord->m_sWord + 1;
int iLen = pWord->m_sWord[0];
uint64_t uHash = sphFNV64 ( pDictWord, iLen );
tReject.m_dTerms.Add ( uHash );
if ( bBuildInfix )
{
BuildBloom ( pDictWord, iLen, BLOOM_NGRAM_0, bUtf8, PERCOLATE_BLOOM_WILD_COUNT, tBloom0 );
BuildBloom ( pDictWord, iLen, BLOOM_NGRAM_1, bUtf8, PERCOLATE_BLOOM_WILD_COUNT, tBloom1 );
}
if ( bMultiDocs )
{
RtDocReader_c tDoc ( pSeg, *pWord );
while ( tDoc.UnzipDoc() )
{
assert ( tDoc->m_tRowID<pSeg->m_uRows );
tReject.m_dPerDocTerms[tDoc->m_tRowID].Add ( uHash );
if ( bBuildInfix )
{
uint64_t * pBloom = tReject.m_dPerDocWilds.Begin() + tDoc->m_tRowID * PERCOLATE_BLOOM_SIZE;
BloomGenTraits_t tBloom2Doc0 ( pBloom );
BloomGenTraits_t tBloom2Doc1 ( pBloom + PERCOLATE_BLOOM_WILD_COUNT );
BuildBloom ( pDictWord, iLen, BLOOM_NGRAM_0, bUtf8, PERCOLATE_BLOOM_WILD_COUNT, tBloom2Doc0 );
BuildBloom ( pDictWord, iLen, BLOOM_NGRAM_1, bUtf8, PERCOLATE_BLOOM_WILD_COUNT, tBloom2Doc1 );
}
}
}
}
tReject.m_dTerms.Uniq();
if ( bMultiDocs )
{
for ( auto & dTerms : tReject.m_dPerDocTerms )
dTerms.Uniq();
}
return tReject;
}
static void DoQueryGetRejects ( const XQNode_t * pNode, const DictRefPtr_c& pDict, CSphVector<uint64_t> & dRejectTerms, CSphFixedVector<uint64_t> & dRejectBloom, CSphVector<CSphString> & dSuffixes, bool & bOnlyTerms, bool bUtf8 )
{
// FIXME!!! replace recursion to prevent stack overflow for large and complex queries
if ( pNode && !( pNode->GetOp()==SPH_QUERY_AND || pNode->GetOp()==SPH_QUERY_ANDNOT ) )
bOnlyTerms = false;
if ( !pNode || pNode->GetOp()==SPH_QUERY_NOT )
return;
BYTE sTmp[3 * SPH_MAX_WORD_LEN + 16];
ARRAY_FOREACH ( i, pNode->m_dWords )
{
const XQKeyword_t & tWord = pNode->m_dWords[i];
int iLen = tWord.m_sWord.Length();
assert ( iLen < (int)sizeof( sTmp ) );
if ( !iLen )
continue;
bool bStarTerm = false;
int iCur = 0;
int iInfixLen = 0;
const char * sInfix = nullptr;
const char * s = tWord.m_sWord.cstr();
BYTE * sDst = sTmp;
while ( *s )
{
if ( sphIsWild ( *s ) )
{
iCur = 0;
bStarTerm = true;
} else if ( ++iCur>iInfixLen )
{
sInfix = s - iCur + 1;
iInfixLen = iCur;
}
*sDst++ = *s++;
}
sTmp[iLen] = '\0';
// term goes to bloom
if ( bStarTerm )
{
// initialize bloom filter array
if ( !dRejectBloom.GetLength() )
{
dRejectBloom.Reset ( PERCOLATE_BLOOM_SIZE );
dRejectBloom.Fill ( 0 );
}
BloomGenTraits_t tBloom0 ( dRejectBloom.Begin() );
BloomGenTraits_t tBloom1 ( dRejectBloom.Begin() + PERCOLATE_BLOOM_WILD_COUNT );
BuildBloom ( (const BYTE *)sInfix, iInfixLen, BLOOM_NGRAM_0, bUtf8, PERCOLATE_BLOOM_WILD_COUNT, tBloom0 );
BuildBloom ( (const BYTE *)sInfix, iInfixLen, BLOOM_NGRAM_1, bUtf8, PERCOLATE_BLOOM_WILD_COUNT, tBloom1 );
dSuffixes.Add().SetBinary ( sInfix, iInfixLen );
continue;
}
SphWordID_t uWord = 0;
if ( tWord.m_bMorphed )
uWord = pDict->GetWordIDNonStemmed ( sTmp );
else
uWord = pDict->GetWordID ( sTmp );
if ( !uWord )
continue;
// term goes to regular array
dRejectTerms.Add ( sphFNV64 ( sTmp ) );
}
// composite nodes children recursion
// for AND-NOT node NOT children should be skipped
int iCount = pNode->m_dChildren.GetLength();
if ( pNode->GetOp()==SPH_QUERY_ANDNOT && iCount>1 )
iCount = 1;
for ( int i=0; i<iCount; i++ )
DoQueryGetRejects ( pNode->m_dChildren[i], pDict, dRejectTerms, dRejectBloom, dSuffixes, bOnlyTerms, bUtf8 );
}
static void QueryGetRejects ( const XQNode_t * pNode, const DictRefPtr_c& pDict, CSphFixedVector<uint64_t> & dRejectTerms, CSphFixedVector<uint64_t> & dRejectBloom, CSphVector<CSphString> & dSuffixes, bool & bOnlyTerms, bool bUtf8 )
{
CSphVector<uint64_t> dTmpTerms;
DoQueryGetRejects ( pNode, pDict, dTmpTerms, dRejectBloom, dSuffixes, bOnlyTerms, bUtf8 );
dTmpTerms.Uniq();
dRejectTerms.CopyFrom ( dTmpTerms );
}
static void DoQueryGetTerms ( const XQNode_t * pNode, const DictRefPtr_c& pDict, DictMap_t & hDict, CSphVector<BYTE> & dKeywords )
{
if ( !pNode )
return;
BYTE sTmp[3 * SPH_MAX_WORD_LEN + 16];
ARRAY_FOREACH ( i, pNode->m_dWords )
{
const XQKeyword_t & tWord = pNode->m_dWords[i];
uint64_t uHash = sphFNV64 ( tWord.m_sWord.cstr() );
if ( hDict.m_hTerms.Find ( uHash ) )
continue;
int iLen = tWord.m_sWord.Length();
assert ( iLen < (int)sizeof( sTmp ) );
if ( !iLen )
continue;
memcpy ( (char *)sTmp, tWord.m_sWord.cstr(), iLen );
sTmp[iLen] = '\0';
SphWordID_t uWord = 0;
if ( tWord.m_bMorphed )
uWord = pDict->GetWordIDNonStemmed ( sTmp );
else
uWord = pDict->GetWordID ( sTmp );
if ( !uWord )
continue;
iLen = (int) strnlen ( (const char *)sTmp, sizeof(sTmp) );
DictTerm_t & tTerm = hDict.m_hTerms.Acquire ( uHash );
tTerm.m_uWordID = uWord;
tTerm.m_iWordOff = dKeywords.GetLength();
tTerm.m_iWordLen = iLen;
dKeywords.Append ( sTmp, iLen );
}
for ( const XQNode_t * pChild : pNode->m_dChildren )
DoQueryGetTerms ( pChild, pDict, hDict, dKeywords );
}
static void QueryGetTerms ( const XQNode_t * pNode, const DictRefPtr_c& pDict, DictMap_t & hDict )
{
CSphVector<BYTE> dKeywords;
DoQueryGetTerms ( pNode, pDict, hDict, dKeywords );
hDict.m_dKeywords.CopyFrom ( dKeywords );
}
static bool TermsReject ( const VecTraits_T<uint64_t> & dDocs, const VecTraits_T<uint64_t> & dQueries )
{
if ( !dDocs.GetLength() || !dQueries.GetLength() )
return false;
const uint64_t * pQTerm = dQueries.Begin();
const uint64_t * pQEnd = dQueries.Begin() + dQueries.GetLength();
const uint64_t * pTermDoc = dDocs.Begin();
const uint64_t * pTermLast = dDocs.Begin() + dDocs.GetLength() - 1;
for ( ; pQTerm<pQEnd && pTermDoc<=pTermLast; pQTerm++, pTermDoc++ )
{
pTermDoc = sphBinarySearch ( pTermDoc, pTermLast, *pQTerm );
if ( !pTermDoc )
return false;
}
return ( pQTerm==pQEnd );
}
static bool WildsReject ( const uint64_t * pFilter, const CSphFixedVector<uint64_t> & dQueries )
{
if ( !dQueries.GetLength() )
return false;
const uint64_t * pQTerm = dQueries.Begin();
const uint64_t * pQEnd = dQueries.Begin() + dQueries.GetLength();
for ( ; pQTerm<pQEnd; pQTerm++, pFilter++ )
{
// check bloom passes
if ( *pQTerm && ( (*pQTerm & *pFilter)!=*pQTerm ) )
return false;
}
return true;
}
bool SegmentReject_t::Filter ( const StoredQuery_t * pStored, bool bUtf8 ) const
{
// no early reject for complex queries
if ( !pStored->m_bOnlyTerms )
return false;
// empty query rejects
if ( !pStored->m_dRejectTerms.GetLength() && !pStored->m_dRejectWilds.GetLength() )
return true;
bool bTermsRejected = ( pStored->m_dRejectTerms.GetLength()==0 );
if ( pStored->m_dRejectTerms.GetLength() )
bTermsRejected = !TermsReject ( m_dTerms, pStored->m_dRejectTerms );
if ( bTermsRejected && ( !m_dWilds.GetLength() || !pStored->m_dRejectWilds.GetLength() ) )
return true;
bool bWildRejected = ( m_dWilds.GetLength()==0 || pStored->m_dRejectWilds.GetLength()==0 );
if ( m_dWilds.GetLength() && pStored->m_dRejectWilds.GetLength() )
bWildRejected = !WildsReject ( m_dWilds.Begin(), pStored->m_dRejectWilds );
if ( bTermsRejected && bWildRejected )
return true;
if ( !bTermsRejected && pStored->m_dRejectTerms.GetLength() && m_dPerDocTerms.GetLength() )
{
// in case no document matched - early reject triggers
int iRejects = 0;
ARRAY_FOREACH ( i, m_dPerDocTerms )
{
if ( TermsReject ( m_dPerDocTerms[i], pStored->m_dRejectTerms ) )
break;
iRejects++;
}
bTermsRejected = ( iRejects==m_dPerDocTerms.GetLength() );
}
if ( bTermsRejected && !bWildRejected && pStored->m_dRejectWilds.GetLength() && m_dPerDocWilds.GetLength() )
{
// in case no document matched - early reject triggers
int iRowsPassed = 0;
for ( int i=0; i<m_iRows && iRowsPassed==0; i++ )
{
BloomCheckTraits_t tBloom0 ( m_dPerDocWilds.Begin() + i * PERCOLATE_BLOOM_SIZE );
BloomCheckTraits_t tBloom1 ( m_dPerDocWilds.Begin() + i * PERCOLATE_BLOOM_SIZE + PERCOLATE_BLOOM_WILD_COUNT );
int iWordsPassed = 0;
ARRAY_FOREACH ( iWord, pStored->m_dSuffixes )
{
const CSphString & sSuffix = pStored->m_dSuffixes[iWord];
int iLen = sSuffix.Length();
BuildBloom ( (const BYTE *)sSuffix.cstr(), iLen, BLOOM_NGRAM_0, bUtf8, PERCOLATE_BLOOM_WILD_COUNT, tBloom0 );
if ( !tBloom0.IterateNext() )
break;
BuildBloom ( (const BYTE *)sSuffix.cstr(), iLen, BLOOM_NGRAM_1, bUtf8, PERCOLATE_BLOOM_WILD_COUNT, tBloom1 );
if ( !tBloom1.IterateNext() )
break;
iWordsPassed++;
}
if ( iWordsPassed!=pStored->m_dSuffixes.GetLength() )
continue;
iRowsPassed++;
}
bWildRejected = ( iRowsPassed==0 );
}
return ( bTermsRejected && bWildRejected );
}
// FIXME!!! move to common RT code instead copy-paste it
struct SubstringInfo_t
{
char m_sMorph[SPH_MAX_KEYWORD_LEN];
const char * m_sSubstring = nullptr;
const char * m_sWildcard = nullptr;
int m_iSubLen = 0;
};
static Slice_t GetTermLocator ( const char * sWord, int iLen, const RtSegment_t * pSeg )
{
Slice_t tChPoint;
tChPoint.m_uLen = pSeg->m_dWords.GetLength();
// tighten dictionary location
if ( pSeg->m_dWordCheckpoints.GetLength() )
{
const RtWordCheckpoint_t* pCheckpoint = sphSearchCheckpointWrd ( sWord, iLen, false, pSeg->m_dWordCheckpoints );
if ( !pCheckpoint )
{
tChPoint.m_uLen = pSeg->m_dWordCheckpoints.Begin()->m_iOffset;
} else
{
tChPoint.m_uOff = pCheckpoint->m_iOffset;
if ( ( pCheckpoint + 1 )<=( &pSeg->m_dWordCheckpoints.Last() ) )
tChPoint.m_uLen = pCheckpoint[1].m_iOffset;
}
}
return tChPoint;
}
static Slice_t GetPrefixLocator ( const char * sWord, bool bHasMorphology, const RtSegment_t * pSeg, SubstringInfo_t & tSubInfo )
{
// do prefix expansion
// remove exact form modifier, if any
const char * sPrefix = sWord;
if ( *sPrefix=='=' )
sPrefix++;
// skip leading wild-cards
// (in case we got here on non-infix index path)
const char * sWildcard = sPrefix;
while ( sphIsWild ( *sPrefix ) )
{
sPrefix++;
sWildcard++;
}
// compute non-wild-card prefix length
int iPrefix = 0;
for ( const char * s = sPrefix; *s && !sphIsWild ( *s ); s++ )
iPrefix++;
// prefix expansion should work on non-stemmed words only
if ( bHasMorphology )
{
tSubInfo.m_sMorph[0] = MAGIC_WORD_HEAD_NONSTEMMED;
memcpy ( tSubInfo.m_sMorph + 1, sPrefix, iPrefix );
sPrefix = tSubInfo.m_sMorph;
iPrefix++;
}
tSubInfo.m_sWildcard = sWildcard;
tSubInfo.m_sSubstring = sPrefix;
tSubInfo.m_iSubLen = iPrefix;
Slice_t tChPoint;
tChPoint.m_uLen = pSeg->m_dWords.GetLength();
// find initial checkpoint or check words prior to 1st checkpoint
if ( !pSeg->m_dWordCheckpoints.IsEmpty() )
{
const RtWordCheckpoint_t * pLast = &pSeg->m_dWordCheckpoints.Last();
const RtWordCheckpoint_t * pCheckpoint = sphSearchCheckpointWrd( sPrefix, iPrefix, true, pSeg->m_dWordCheckpoints );
if ( pCheckpoint )
{
// there could be valid data prior 1st checkpoint that should be unpacked and checked
auto iNameLen = (int) strnlen ( pCheckpoint->m_szWord, SPH_MAX_KEYWORD_LEN );
if ( pCheckpoint!=pSeg->m_dWordCheckpoints.Begin() || (sphDictCmp ( sPrefix, iPrefix, pCheckpoint->m_szWord, iNameLen )==0 && iPrefix==iNameLen) )
tChPoint.m_uOff = pCheckpoint->m_iOffset;
// find the last checkpoint that meets prefix condition ( ie might be a span of terms that splat to a couple of checkpoints )
++pCheckpoint;
while ( pCheckpoint<=pLast )
{
iNameLen = (int) strnlen ( pCheckpoint->m_szWord, SPH_MAX_KEYWORD_LEN );
int iCmp = sphDictCmp ( sPrefix, iPrefix, pCheckpoint->m_szWord, iNameLen );
if ( iCmp==0 && iPrefix==iNameLen )
tChPoint.m_uOff = pCheckpoint->m_iOffset;
if ( iCmp<0 )
break;
++pCheckpoint;
}
}
}
return tChPoint;
}
static void GetSuffixLocators ( const char * sWord, int iMaxCodepointLength, const RtSegment_t * pSeg, SubstringInfo_t & tSubInfo, CSphVector<Slice_t> & dPoints )
{
assert ( sphIsWild ( *sWord ) );
// find the longest substring of non-wild-cards
const char * sMaxInfix = nullptr;
int iMaxInfix = 0;
int iCur = 0;
for ( const char * s = sWord; *s; s++ )
{
if ( sphIsWild ( *s ) )
{
iCur = 0;
} else if ( ++iCur>iMaxInfix )
{
sMaxInfix = s - iCur + 1;
iMaxInfix = iCur;
}
}
tSubInfo.m_sWildcard = sWord;
tSubInfo.m_sSubstring = sMaxInfix;
tSubInfo.m_iSubLen = iMaxInfix;
CSphVector<DWORD> dInfixes;
ExtractInfixCheckpoints ( sMaxInfix, iMaxInfix, iMaxCodepointLength, pSeg->m_dWordCheckpoints.GetLength(), pSeg->m_dInfixFilterCP, dInfixes );
ARRAY_FOREACH ( i, dInfixes )
{
int iNext = dInfixes[i];
iCur = iNext - 1;
Slice_t & tChPoint = dPoints.Add();
tChPoint.m_uOff = 0;
tChPoint.m_uLen = pSeg->m_dWords.GetLength();
if ( iCur>=0 )
tChPoint.m_uOff = pSeg->m_dWordCheckpoints[iCur].m_iOffset;
if ( iNext<pSeg->m_dWordCheckpoints.GetLength() )
tChPoint.m_uLen = pSeg->m_dWordCheckpoints[iNext].m_iOffset;
}
}
static void PercolateTags ( const char * szTags, CSphFixedVector<uint64_t> & dDstTags )
{
if ( !szTags || !*szTags )
return;
StrVec_t dTagStrings;
sphSplit ( dTagStrings, szTags );
if ( dTagStrings.IsEmpty() )
return;
CSphFixedVector<uint64_t> dTmpTags ( dTagStrings.GetLength() );
ARRAY_FOREACH ( i, dTagStrings )
dTmpTags[i] = sphFNV64 ( dTagStrings[i].cstr() );
dTmpTags.Sort();
int iLen = sphUniq ( dTmpTags.Begin(), dTmpTags.GetLength() );
dDstTags.CopyFrom ( dTmpTags.Slice ( 0, iLen ) );
}
static void PercolateAppendTags ( const CSphString& sTags, CSphVector<uint64_t>& dTags )
{
if ( sTags.IsEmpty() )
return;
StrVec_t dTagStrings;
sphSplit ( dTagStrings, sTags.cstr() );
if ( dTagStrings.IsEmpty() )
return;
dTags.ReserveGap ( dTagStrings.GetLength() );
for ( const auto& sTag : dTagStrings )
dTags.Add ( sphFNV64 ( sTag.cstr() ) );
}
static bool TagsMatched ( const VecTraits_T<uint64_t>& dFilter, const VecTraits_T<uint64_t>& dQueryTags )
{
auto *pFilter = dFilter.begin();
auto *pQueryTags = dQueryTags.begin();
auto *pFilterEnd = dFilter.end();
auto *pTagsEnd = dQueryTags.end();
while ( pFilter<pFilterEnd && pQueryTags<pTagsEnd )
{
if ( *pQueryTags<*pFilter )
++pQueryTags;
else if ( *pFilter<*pQueryTags )
++pFilter;
else if ( *pQueryTags==*pFilter )
return true;
}
return false;
}
//////////////////////////////////////////////////////////////////////////
// percolate index definition
PercolateIndex_c::PercolateIndex_c ( CSphString sIndexName, CSphString sPath, CSphSchema tSchema )
: PercolateIndex_i { std::move ( sIndexName ), std::move ( sPath ) }
{
m_tSchema = std::move ( tSchema );
// add id column
CSphColumnInfo tCol ( sphGetDocidName () );
tCol.m_eAttrType = SPH_ATTR_BIGINT;
m_tMatchSchema.AddAttr ( tCol, true );
// fill match schema
m_tMatchSchema.AddAttr ( CSphColumnInfo ( "query", SPH_ATTR_STRINGPTR ), true );
m_tMatchSchema.AddAttr ( CSphColumnInfo ( "tags", SPH_ATTR_STRINGPTR ), true );
m_tMatchSchema.AddAttr ( CSphColumnInfo ( "filters", SPH_ATTR_STRINGPTR ), true );
m_pQueries = new CSphVector<StoredQuerySharedPtr_t>;
}
PercolateIndex_c::~PercolateIndex_c ()
{
bool bValid = m_pTokenizer && m_pDict;
if ( bValid )
SaveMeta ( sphInterrupted() );
SafeClose ( m_iLockFD );
if ( m_bIndexDeleted )
{
CSphString sFile = GetFilename ( "meta" );
::unlink ( sFile.cstr() );
sFile = GetFilename ( SPH_EXT_SETTINGS );
::unlink ( sFile.cstr() );
}
}
bool PercolateIndex_c::BindAccum ( RtAccum_t * pAccExt, CSphString* pError )
{
return PrepareAccum ( pAccExt, true, pError );
}
bool PercolateIndex_c::AddDocument ( InsertDocData_c & tDoc, bool bReplace, const CSphString & sTokenFilterOptions, CSphString & sError, CSphString & sWarning, RtAccum_t * pAcc )
{
if ( !BindAccum ( pAcc, &sError ) )
return false;
TokenizerRefPtr_c tTokenizer = CloneIndexingTokenizer ();
if ( !tTokenizer )
{
sError = GetLastError ();
return false;
}
CSphSource_StringVector tSrc ( tDoc.m_dFields, m_tSchema );
if ( m_tSettings.m_bHtmlStrip &&
!tSrc.SetStripHTML ( m_tSettings.m_sHtmlIndexAttrs.cstr(), m_tSettings.m_sHtmlRemoveElements.cstr(),
m_tSettings.m_bIndexSP, m_tSettings.m_sZones.cstr(), sError ) )
return false;
// TODO: field filter \ token filter?
tSrc.Setup ( m_tSettings, nullptr );
tSrc.SetTokenizer ( std::move ( tTokenizer ) );
tSrc.SetDict ( pAcc->m_pDict );
if ( m_pFieldFilter )
tSrc.SetFieldFilter ( m_pFieldFilter->Clone() );
if ( !tSrc.Connect ( m_sLastError ) )
return false;
m_tSchema.CloneWholeMatch ( tSrc.m_tDocInfo, tDoc.m_tDoc );
bool bEOF = false;
if ( !tSrc.IterateStart ( sError ) || !tSrc.IterateDocument ( bEOF, sError ) )
return false;
ISphHits * pHits = tSrc.IterateHits ( sError );
pAcc->GrabLastWarning ( sWarning );
pAcc->AddDocument ( pHits, tDoc, true, m_tSchema.GetRowSize(), nullptr );
return true;
}
//////////////////////////////////////////////////////////////////////////
// percolate Qword
struct PercolateQword_t : public ISphQword
{
public:
PercolateQword_t () = default;
const CSphMatch & GetNextDoc() final
{
m_iHits = 0;
while ( !m_tDocReader.UnzipDoc() )
{
if ( m_iDoc >= m_dDoclist.GetLength() )
{
m_tMatch.m_tRowID = INVALID_ROWID;
return m_tMatch;
}
SetupReader();
}
const auto& tDoc = *m_tDocReader;
m_tMatch.m_tRowID = tDoc.m_tRowID;
m_dQwordFields.Assign32 ( tDoc.m_uDocFields );
m_uMatchHits = tDoc.m_uHits;
m_iHitlistPos = (uint64_t( tDoc.m_uHits)<<32) + tDoc.m_uHit;
m_bAllFieldsKnown = false;
return m_tMatch;
}
void SeekHitlist ( SphOffset_t uOff ) final
{
int iHits = (int)(uOff>>32);
if ( iHits==1 )
{
m_uNextHit = DWORD(uOff);
} else
{
m_uNextHit = 0;
m_tHitReader.Seek ( m_pHits + DWORD ( uOff ), iHits );
}
}
Hitpos_t GetNextHit () final
{
if ( !m_uNextHit )
return Hitpos_t ( m_tHitReader.UnzipHit() );
else if ( m_uNextHit==0xffffffffUL )
return EMPTY_HIT;
else
return Hitpos_t ( std::exchange ( m_uNextHit, 0xffffffffUL ) );
}
bool Setup ( const RtSegment_t * pSeg, CSphVector<Slice_t> & dDoclist )
{
m_iDoc = 0;
m_tDocReader.Reset();
m_pSeg = pSeg;
SafeAddRef ( pSeg );
m_pHits = pSeg->m_dHits.begin();
m_dDoclist.Set ( dDoclist.Begin(), dDoclist.GetLength() );
dDoclist.LeakData();
if ( m_iDoc && m_iDoc>=m_dDoclist.GetLength() )
return false;
SetupReader();
return true;
}
private:
void SetupReader ()
{
RtWord_t tWord;
tWord.m_uDoc = m_dDoclist[m_iDoc].m_uOff;
tWord.m_uDocs = m_dDoclist[m_iDoc].m_uLen;
m_tDocReader.Init ( m_pSeg, tWord );
++m_iDoc;
}
ConstRtSegmentRefPtf_t m_pSeg;
CSphFixedVector<Slice_t> m_dDoclist { 0 };
CSphMatch m_tMatch;
RtDocReader_c m_tDocReader;
RtHitReader_c m_tHitReader;
int m_iDoc = 0;
DWORD m_uNextHit = 0;
const BYTE* m_pHits = nullptr;
};
enum class PERCOLATE
{
EXACT,
PREFIX,
INFIX
};
ISphQword * PercolateQwordSetup_c::QwordSpawn ( const XQKeyword_t & ) const
{
return new PercolateQword_t();
}
bool PercolateQwordSetup_c::QwordSetup ( ISphQword * pQword ) const
{
auto * pMyQword = (PercolateQword_t *)pQword;
const char * sWord = pMyQword->m_sDictWord.cstr();
int iWordLen = pMyQword->m_sDictWord.Length();
if ( !iWordLen )
return false;
// fix for the case '=*term' that should count as infix
if ( iWordLen>1 && sWord[0]==MAGIC_WORD_HEAD_NONSTEMMED && sWord[1]=='*' )
{
sWord++;
iWordLen--;
}
SubstringInfo_t tSubInfo;
CSphVector<Slice_t> dDictLoc;
PERCOLATE eCmp = PERCOLATE::EXACT;
if ( !sphHasExpandableWildcards ( sWord ) )
{
// no wild-cards, or just wild-cards? do not expand
Slice_t tChPoint = GetTermLocator ( sWord, iWordLen, m_pSeg );
dDictLoc.Add ( tChPoint );
} else if ( !sphIsWild ( *sWord ) )
{
eCmp = PERCOLATE::PREFIX;
Slice_t tChPoint = GetPrefixLocator ( sWord, m_pDict->HasMorphology(), m_pSeg, tSubInfo );
dDictLoc.Add ( tChPoint );
} else
{
eCmp = PERCOLATE::INFIX;
GetSuffixLocators ( sWord, m_iMaxCodepointLength, m_pSeg, tSubInfo, dDictLoc );
}
// to skip heading magic chars ( NONSTEMMED ) in the prefix
int iSkipMagic = 0;
if ( eCmp==PERCOLATE::PREFIX || eCmp==PERCOLATE::INFIX )
iSkipMagic = ( BYTE ( *tSubInfo.m_sSubstring )<0x20 );
// utf8 conversion for sphWildcardMatch
int dWildcard [ SPH_MAX_WORD_LEN + 1 ];
int * pWildcard = nullptr;
if ( ( eCmp==PERCOLATE::PREFIX || eCmp==PERCOLATE::INFIX ) && sphIsUTF8 ( tSubInfo.m_sWildcard ) && sphUTF8ToWideChar ( tSubInfo.m_sWildcard, dWildcard, SPH_MAX_WORD_LEN ) )
pWildcard = dWildcard;
// cases:
// empty - check all words
// no matches - check only words prior to 1st checkpoint
// checkpoint found - check words at that checkpoint
const BYTE * pWordBase = m_pSeg->m_dWords.Begin();
CSphVector<Slice_t> dDictWords;
ARRAY_FOREACH ( i, dDictLoc )
{
RtWordReader_c tReader ( m_pSeg, true, PERCOLATE_WORDS_PER_CP, m_eHitless );
// locator
// m_uOff - Start
// m_uLen - End
tReader.m_pCur = pWordBase + dDictLoc[i].m_uOff;
tReader.m_pMax = pWordBase + dDictLoc[i].m_uLen;
while ( tReader.UnzipWord() )
{
const auto* pWord = (const RtWord_t*)tReader;
// stemmed terms do not match any kind of wild-cards
if ( ( eCmp==PERCOLATE::PREFIX || eCmp==PERCOLATE::INFIX ) && m_pDict->HasMorphology() && pWord->m_sWord[1]!=MAGIC_WORD_HEAD_NONSTEMMED )
continue;
int iCmp = -1;
switch ( eCmp )
{
case PERCOLATE::EXACT:
iCmp = sphDictCmpStrictly ( (const char *)pWord->m_sWord + 1, pWord->m_sWord[0], sWord, iWordLen );
break;
case PERCOLATE::PREFIX:
iCmp = sphDictCmp ( (const char *)pWord->m_sWord + 1, pWord->m_sWord[0], tSubInfo.m_sSubstring, tSubInfo.m_iSubLen );
if ( iCmp==0 )
{
if ( !( tSubInfo.m_iSubLen<=pWord->m_sWord[0] && sphWildcardMatch ( (const char *)pWord->m_sWord + 1 + iSkipMagic, tSubInfo.m_sWildcard, pWildcard ) ) )
iCmp = -1;
}
break;
case PERCOLATE::INFIX:
if ( sphWildcardMatch ( (const char *)pWord->m_sWord + 1 + iSkipMagic, tSubInfo.m_sWildcard, pWildcard ) )
iCmp = 0;
break;
default: break;
}
if ( iCmp==0 )
{
pMyQword->m_iDocs += pWord->m_uDocs;
pMyQword->m_iHits += pWord->m_uHits;
Slice_t & tDictPoint = dDictWords.Add();
tDictPoint.m_uOff = pWord->m_uDoc;
tDictPoint.m_uLen = pWord->m_uDocs;
}
if ( iCmp>0 || ( iCmp==0 && eCmp==PERCOLATE::EXACT ) )
break;
}
}
bool bWordSet = false;
if ( dDictWords.GetLength() )
{
dDictWords.Sort ( bind ( &Slice_t::m_uOff ) );
bWordSet = pMyQword->Setup ( m_pSeg, dDictWords );
}
return bWordSet;
}
ISphQword * PercolateQwordSetup_c::ScanSpawn() const
{
return new QwordScan_c ( m_pSeg->m_uRows );
}
SphWordID_t PercolateDictProxy_c::GetWordID ( BYTE * pWord )
{
assert ( m_pDict );
assert ( !m_bHasMorph || m_pDictMorph );
// apply stemmers
if ( m_bHasMorph && pWord[0]!='=' )
m_pDictMorph->GetWordID ( pWord );
return const_cast<DictMap_t *>(m_pDict)->GetTerm ( pWord );
}
SphWordID_t DictMap_t::GetTerm ( BYTE * sWord ) const
{
const DictTerm_t * pTerm = m_hTerms.Find ( sphFNV64 ( sWord ) );
if ( !pTerm )
return 0;
memcpy ( sWord, m_dKeywords.Begin() + pTerm->m_iWordOff, pTerm->m_iWordLen );
return pTerm->m_uWordID;
}
PercolateMatchContext_t * PercolateIndex_c::CreateMatchContext ( const RtSegment_t * pSeg, const SegmentReject_t & tReject )
{
return new PercolateMatchContext_t ( pSeg, m_iMaxCodepointLength, m_pDict->HasMorphology(), GetStatelessDict ( m_pDict ), this
, m_tSchema, tReject, m_tSettings.m_eHitless, ( m_tSchema.GetFieldsCount()>32 ) );
}
namespace {
// full scan when no docs required
int FullscanWithoutDocs ( PercolateMatchContext_t & tMatchCtx )
{
const CSphIndex * pIndex = tMatchCtx.m_pTermSetup->m_pIndex;
auto uRows = ((const RtSegment_t *) tMatchCtx.m_pCtx->m_pIndexData)->m_uRows;
CSphMatch tDoc;
int iMatchesCount = 0;
for ( DWORD i = 0; i<uRows; ++i )
{
tDoc.m_tRowID = i;
if ( !pIndex->EarlyReject ( tMatchCtx.m_pCtx.get(), tDoc ))
++iMatchesCount;
}
return iMatchesCount;
}
// full scan and collect docs
int FullScanCollectingDocs ( PercolateMatchContext_t & tMatchCtx )
{
const CSphIndex * pIndex = tMatchCtx.m_pTermSetup->m_pIndex;
const auto * pSeg = (const RtSegment_t *) tMatchCtx.m_pCtx->m_pIndexData;
int iStride = tMatchCtx.m_tSchema.GetRowSize ();
int iCountIdx = tMatchCtx.m_dDocsMatched.GetLength ();
tMatchCtx.m_dDocsMatched.Add ( 0 ); // placeholder for counter
FakeRL_t _ ( pSeg->m_tLock ); // that is s-t by design, don't need real lock
const CSphRowitem * pRow = pSeg->m_dRows.Begin ();
CSphMatch tDoc;
int iMatchesCount = 0;
for ( DWORD i = 0; i<pSeg->m_uRows; ++i )
{
tDoc.m_tRowID = i;
if ( !pIndex->EarlyReject ( tMatchCtx.m_pCtx.get(), tDoc ) )
{
tMatchCtx.m_dDocsMatched.Add ( (int)sphGetDocID(pRow) );
++iMatchesCount;
}
pRow += iStride;
}
if ( iMatchesCount ) // write counter of docs into placeholder
tMatchCtx.m_dDocsMatched[iCountIdx] = iMatchesCount;
else
tMatchCtx.m_dDocsMatched.Resize ( iCountIdx ); // pop's up reserved but not used matched counter
return iMatchesCount;
}
// full-text search when no docs required
int FtMatchingWithoutDocs ( const StoredQuery_t * pStored, PercolateMatchContext_t & tMatchCtx )
{
tMatchCtx.m_pDictMap->SetMap ( pStored->m_hDict ); // set terms dictionary
CSphQueryResultMeta tTmpMeta;
std::unique_ptr<ISphRanker> pRanker = sphCreateRanker ( *pStored->m_pXQ, tMatchCtx.m_tDummyQuery,
tTmpMeta, *tMatchCtx.m_pTermSetup, *tMatchCtx.m_pCtx, tMatchCtx.m_tSchema );
if ( !pRanker )
return 0;
int iMatchesCount = 0;
for ( auto iMatches = pRanker->GetMatches (); iMatches!=0; iMatches = pRanker->GetMatches ())
iMatchesCount += iMatches;
return iMatchesCount;
}
// full-text search and collect docs
int FtMatchingCollectingDocs ( const StoredQuery_t * pStored, PercolateMatchContext_t & tMatchCtx )
{
tMatchCtx.m_pDictMap->SetMap ( pStored->m_hDict ); // set terms dictionary
CSphQueryResultMeta tTmpMeta;
std::unique_ptr<ISphRanker> pRanker = sphCreateRanker ( *pStored->m_pXQ, tMatchCtx.m_tDummyQuery,
tTmpMeta, *tMatchCtx.m_pTermSetup, *tMatchCtx.m_pCtx, tMatchCtx.m_tSchema );
if ( !pRanker )
return 0;
int iCountIdx = tMatchCtx.m_dDocsMatched.GetLength();
int iMatchesCount = 0;
// reserve space for matched docs counter
tMatchCtx.m_dDocsMatched.Add ( iMatchesCount );
const auto * pSeg = (const RtSegment_t *)tMatchCtx.m_pCtx->m_pIndexData;
const CSphMatch * pMatch = pRanker->GetMatchesBuffer();
for ( auto iMatches = pRanker->GetMatches (); iMatches!=0; iMatches = pRanker->GetMatches ())
{
int * pDocids = tMatchCtx.m_dDocsMatched.AddN ( iMatches );
for ( int i = 0; i<iMatches; ++i )
pDocids[i] = (int)sphGetDocID ( pSeg->GetDocinfoByRowID ( pMatch[i].m_tRowID ) );
iMatchesCount += iMatches;
}
if ( iMatchesCount ) // write counter of docs into placeholder
tMatchCtx.m_dDocsMatched[iCountIdx] = iMatchesCount;
else
tMatchCtx.m_dDocsMatched.Resize ( iCountIdx ); // pop's up reserved but not used matched counter
return iMatchesCount;
}
// percolate matching
void MatchingWorkAction ( const StoredQuery_t * pStored, PercolateMatchContext_t & tMatchCtx )
{
int64_t tmQueryStart = ( tMatchCtx.m_bVerbose ? sphMicroTimer() : 0 );
tMatchCtx.m_iOnlyTerms += ( pStored->m_bOnlyTerms ? 1 : 0 );
if ( !pStored->IsFullscan() && tMatchCtx.m_tReject.Filter ( pStored, tMatchCtx.m_bUtf8 ) )
return;
const auto * pSeg = (const RtSegment_t *)tMatchCtx.m_pCtx->m_pIndexData;
FakeRL_t _ ( pSeg->m_tLock ); // that is s-t by design, don't need real lock
const BYTE * pBlobs = pSeg->m_dBlobs.Begin();
++tMatchCtx.m_iEarlyPassed;
AT_SCOPE_EXIT ( [&tMatchCtx]() { tMatchCtx.m_pCtx->ResetFilters(); } );
CSphString sError;
CSphString sWarning;
// setup filters
CreateFilterContext_t tFlx;
tFlx.m_pFilters = &pStored->m_dFilters;
tFlx.m_pFilterTree = &pStored->m_dFilterTree;
tFlx.m_pMatchSchema = &tMatchCtx.m_tSchema;
tFlx.m_pBlobPool = pBlobs;
bool bRes = tMatchCtx.m_pCtx->CreateFilters ( tFlx, sError, sWarning );
tMatchCtx.m_dMsg.Err ( sError );
tMatchCtx.m_dMsg.Warn ( sWarning );
if (!bRes )
{
++tMatchCtx.m_iQueriesFailed;
return;
}
int iMatchesCount;
if ( tMatchCtx.m_bGetDocs )
iMatchesCount = pStored->IsFullscan ()
? FullScanCollectingDocs ( tMatchCtx )
: FtMatchingCollectingDocs ( pStored, tMatchCtx );
else
iMatchesCount = pStored->IsFullscan ()
? FullscanWithoutDocs ( tMatchCtx )
: FtMatchingWithoutDocs ( pStored, tMatchCtx );
if ( !iMatchesCount )
return;
// collect matched pq, if any
tMatchCtx.m_iDocsMatched += iMatchesCount;
PercolateQueryDesc & tDesc = tMatchCtx.m_dQueryMatched.Add();
tDesc.m_iQUID = pStored->m_iQUID;
if ( tMatchCtx.m_bGetQuery )
{
tDesc.m_sQuery = pStored->m_sQuery;
tDesc.m_sTags = pStored->m_sTags;
tDesc.m_bQL = pStored->m_bQL;
if ( tMatchCtx.m_bGetFilters && pStored->m_dFilters.GetLength() )
{
StringBuilder_c sFilters;
FormatFiltersQL ( pStored->m_dFilters, pStored->m_dFilterTree, sFilters );
sFilters.MoveTo ( tDesc.m_sFilters );
}
}
if ( tMatchCtx.m_bVerbose )
tMatchCtx.m_dDt.Add ( (int)( sphMicroTimer() - tmQueryStart ) );
}
void MatchingWork ( const StoredQuery_t* pStored, PercolateMatchContext_t& tMatchCtx )
{
int iStackBase = Threads::GetStackUsed();
if ( StoredQuery_t::m_iStackBaseRequired < iStackBase )
StoredQuery_t::m_iStackBaseRequired = iStackBase;
int iQueryStack = Threads::GetStackUsed() + pStored->m_iStackRequired;
auto iMyStackSize = Threads::MyStackSize();
if ( iMyStackSize >= iQueryStack )
return MatchingWorkAction ( pStored, tMatchCtx );
if ( tMatchCtx.m_iMaxStackSize >= iQueryStack )
return Threads::Coro::Continue ( iQueryStack, [&] {
MatchingWorkAction ( pStored, tMatchCtx );
});
tMatchCtx.m_dMsg.Err ( "PQ requires %d bytes of stack (%d + %d), but only %d available", iQueryStack, pStored->m_iStackRequired, Threads::GetStackUsed(), (int)tMatchCtx.m_iMaxStackSize);
++tMatchCtx.m_iQueriesFailed;
}
} // static namespace
void PercolateQueryDesc::Swap ( PercolateQueryDesc & tOther )
{
::Swap ( m_iQUID, tOther.m_iQUID );
::Swap ( m_bQL, tOther.m_bQL );
m_sQuery.Swap ( tOther.m_sQuery );
m_sTags.Swap ( tOther.m_sTags );
m_sFilters.Swap ( tOther.m_sFilters );
}
struct PQMergeIterator_t
{
PQMatchContextResult_t * m_pMatch = nullptr;
int m_iIdx = 0;
int m_iElems = 0;
CSphFixedVector<int> m_dElems;
CSphFixedVector<int> m_dDocOffsets {0};
explicit PQMergeIterator_t ( PQMatchContextResult_t * pMatch )
: m_pMatch ( pMatch )
, m_iElems ( pMatch->m_dQueryMatched.GetLength () )
, m_dElems ( pMatch->m_dQueryMatched.GetLength () )
{
for ( int i=0; i<m_iElems; ++i )
m_dElems[i] = i;
m_dElems.Sort ( Lesser ( [this] ( int a, int b )
{ return m_pMatch->m_dQueryMatched[a].m_iQUID < m_pMatch->m_dQueryMatched[b].m_iQUID; } ) );
if ( pMatch->m_dDocsMatched.IsEmpty() )
return;
m_dDocOffsets.Reset ( pMatch->m_dQueryMatched.GetLength () );
int iOffset = 0;
for ( int i = 0; i < m_iElems; ++i )
{
m_dDocOffsets[i] = iOffset;
iOffset += 1 + pMatch->m_dDocsMatched[iOffset];
}
}
inline int CurElem() const { return m_dElems[m_iIdx]; }
inline PercolateQueryDesc& CurDesc() const { return m_pMatch->m_dQueryMatched[CurElem()];}
inline int CurDt () const { return m_pMatch->m_dDt[CurElem ()]; }
inline int* CurDocs () const { return &m_pMatch->m_dDocsMatched[m_dDocOffsets[CurElem ()]]; }
static inline bool IsLess ( PQMergeIterator_t *a, PQMergeIterator_t *b )
{
return a->CurDesc().m_iQUID<b->CurDesc().m_iQUID;
}
};
// merge matches from one or many contexts into one result
void PercolateMergeResults ( const VecTraits_T<PQMatchContextResult_t *> & dMatches, PercolateMatchResult_t & tRes )
{
if ( dMatches.IsEmpty() )
return;
int iGotQueries = 0;
int iGotDocs = 0;
tRes.m_iEarlyOutQueries = tRes.m_iTotalQueries;
RawVector_T<PQMergeIterator_t> dIterators;
CSphQueue<PQMergeIterator_t*, PQMergeIterator_t> qMatches ( dMatches.GetLength () );
dIterators.Reserve ( dMatches.GetLength() );
for ( PQMatchContextResult_t * pMatch : dMatches )
{
tRes.m_iQueriesFailed += pMatch->m_iQueriesFailed;
tRes.m_sMessages.AddStringsFrom ( pMatch->m_dMsg );
if ( pMatch->m_dQueryMatched.IsEmpty() )
continue;
dIterators.Emplace_back ( pMatch );
qMatches.Push ( &dIterators.Last () );
iGotQueries += pMatch->m_dQueryMatched.GetLength();
iGotDocs += pMatch->m_iDocsMatched;
tRes.m_iEarlyOutQueries -= pMatch->m_iEarlyPassed;
tRes.m_iOnlyTerms += pMatch->m_iOnlyTerms;
}
tRes.m_iQueriesMatched = iGotQueries;
tRes.m_iDocsMatched = iGotDocs;
iGotDocs += iGotQueries; // in addition to docs, the num of them is written per every query
if ( !iGotQueries )
return;
tRes.m_dQueryDesc.Reset ( iGotQueries );
PercolateQueryDesc * pDst = tRes.m_dQueryDesc.Begin ();
int * pDt = nullptr;
if ( tRes.m_bVerbose )
{
tRes.m_dQueryDT.Reset ( iGotQueries );
pDt = tRes.m_dQueryDT.begin ();
assert ( pDt );
}
int * pDocs = nullptr;
if ( tRes.m_bGetDocs )
{
tRes.m_dDocs.Reset ( iGotDocs );
pDocs = tRes.m_dDocs.begin();
assert ( pDocs );
}
if ( qMatches.GetLength ()==1 ) // fastpath, only 1 essential result set
{
const auto & tIt = *qMatches.Root();
assert ( tIt.m_iElems==iGotQueries );
auto* pMatch = tIt.m_pMatch;
for ( int iIdx : tIt.m_dElems )
{
pDst->Swap ( pMatch->m_dQueryMatched[iIdx] );
++pDst;
if ( pDt )
*pDt++ = pMatch->m_dDt[iIdx];
if ( pDocs )
{
auto iDocOff = tIt.m_dDocOffsets[iIdx];
int iDocBlobSize = pMatch->m_dDocsMatched[iDocOff]+1;
memcpy ( pDocs, &pMatch->m_dDocsMatched[iDocOff], sizeof ( int ) * iDocBlobSize );
pDocs += iDocBlobSize;
}
}
return;
}
PQMergeIterator_t* pMin = qMatches.Root ();
qMatches.Pop ();
assert ( qMatches.GetLength()>=0 ); // since case of only 1 resultset we already processed.
while (true) {
auto& tMin = *pMin;
pDst->Swap ( tMin.CurDesc () );
++pDst;
if ( tRes.m_bVerbose )
*pDt++ = tMin.CurDt();
// docs copy
if ( tRes.m_bGetDocs )
{
auto *pMinDocs = tMin.CurDocs ();
int iDocBlobSize = *pMinDocs + 1;
memcpy ( pDocs, pMinDocs, sizeof ( int ) * iDocBlobSize );
pDocs += iDocBlobSize;
}
++tMin.m_iIdx;
if ( tMin.m_iIdx<tMin.m_iElems )
{
// if current root is better - change the head.
if ( qMatches.GetLength () && !PQMergeIterator_t::IsLess ( pMin, qMatches.Root() ) )
qMatches.Push ( pMin );
else
continue;
}
if ( !qMatches.GetLength () )
break;
pMin = qMatches.Root();
qMatches.Pop();
}
}
// adaptor from vec of PercolateMatchContext_t* to PQMatchContextResult_t*
inline void PercolateMergeResults ( const VecTraits_T<PercolateMatchContext_t *> &dMatches, PercolateMatchResult_t &tRes )
{
auto * pMatches = (PQMatchContextResult_t **) dMatches.begin ();
auto iMatches = dMatches.GetLength ();
PercolateMergeResults ( VecTraits_T<PQMatchContextResult_t *> ( pMatches, iMatches ), tRes );
}
struct PqMatchContextRef_t
{
PercolateMatchContext_t * m_pMatchCtx;
PercolateIndex_c * m_pIndex;
const RtSegment_t * m_pSeg;
const SegmentReject_t & m_tReject;
const PercolateMatchResult_t & m_tRes;
PqMatchContextRef_t ( PercolateIndex_c * pIndex, const RtSegment_t * pSeg,
const SegmentReject_t & tReject, const PercolateMatchResult_t& tRes )
: m_pIndex ( pIndex ), m_pSeg ( pSeg ), m_tReject ( tReject ), m_tRes ( tRes )
{
m_pMatchCtx = pIndex->CreateMatchContext( m_pSeg, m_tReject );
m_pMatchCtx->m_bGetDocs = tRes.m_bGetDocs;
m_pMatchCtx->m_bGetQuery = tRes.m_bGetQuery;
m_pMatchCtx->m_bGetFilters = tRes.m_bGetFilters;
m_pMatchCtx->m_bVerbose = tRes.m_bVerbose;
}
inline static bool IsClonable ()
{
return true;
}
};
struct PqMatchContextClone_t : public PqMatchContextRef_t, ISphNoncopyable
{
explicit PqMatchContextClone_t ( const PqMatchContextRef_t& dParent )
: PqMatchContextRef_t ( dParent.m_pIndex, dParent.m_pSeg, dParent.m_tReject, dParent.m_tRes )
{}
};
// display progress of pq execution
struct PQInfo_t : public TaskInfo_t
{
DECLARE_RENDER( PQInfo_t );
int m_iTotal = 0;
int m_iCurrent = 0;
};
DEFINE_RENDER( PQInfo_t )
{
auto & tInfo = *(const PQInfo_t *) pSrc;
dDst.m_sChain << "PQ ";
if ( tInfo.m_iTotal )
dDst.m_sDescription.Sprintf ( "%d%% of %d:", tInfo.m_iCurrent * 100 / tInfo.m_iTotal, tInfo.m_iTotal );
else
dDst.m_sDescription.Sprintf ( "100% of %d:",tInfo.m_iTotal);
}
void PercolateIndex_c::DoMatchDocuments ( const RtSegment_t * pSeg, PercolateMatchResult_t & tRes )
{
// reject need bloom filter for either infix or prefix
auto tReject = SegmentGetRejects (
pSeg, ( m_tSettings.m_iMinInfixLen>0 || m_tSettings.GetMinPrefixLen ( m_pDict->GetSettings().m_bWordDict )>0 ), m_iMaxCodepointLength>1, m_tSettings.m_eHitless );
auto dStored = GetStored();
auto iJobs = dStored.GetLength ();
tRes.m_iTotalQueries = iJobs;
if ( !iJobs )
return;
// the context
ClonableCtx_T<PqMatchContextRef_t, PqMatchContextClone_t, Threads::ECONTEXT::UNORDERED> dCtx { this, pSeg, tReject, tRes };
auto pDispatcher = Dispatcher::Make ( iJobs, 0, GetEffectiveBaseDispatcherTemplate(), dCtx.IsSingle() );
dCtx.LimitConcurrency ( pDispatcher->GetConcurrency() );
if ( tRes.m_bVerbose )
tRes.m_tmSetup = sphMicroTimer ()+tRes.m_tmSetup;
Coro::ExecuteN ( dCtx.Concurrency ( iJobs ), [&]
{
auto pSource = pDispatcher->MakeSource();
int iJob = -1; // make it consumed
if ( !pSource->FetchTask ( iJob ) )
{
sphLogDebug ( "Early finish parallel DoMatchDocuments because of empty queue" );
return; // already nothing to do, early finish.
}
auto pInfo = PublishTaskInfo ( new PQInfo_t );
pInfo->m_iTotal = iJobs;
auto tJobContext = dCtx.CloneNewContext();
sphLogDebug ( "DoMatchDocuments cloned context %d", tJobContext.second );
auto& tCtx = tJobContext.first;
Threads::Coro::SetThrottlingPeriodMS ( session::GetThrottlingPeriodMS() );
while (true)
{
sphLogDebugv ( "DoMatchDocuments %d, iJob: %d", tJobContext.second, iJob );
pInfo->m_iCurrent = iJob;
MatchingWork ( dStored[iJob], *tCtx.m_pMatchCtx );
iJob = -1; // mark it consumed
if ( !pSource->FetchTask ( iJob ) )
return; // all is done
// yield and reschedule every quant of time. It gives work to other tasks
Threads::Coro::ThrottleAndKeepCrashQuery ();
}
});
sphLogDebug ( "DoMatchDocuments processed in %d thread(s)", dCtx.NumWorked() );
// collect and merge result set
CSphVector<PercolateMatchContext_t *> dResults;
dCtx.ForAll ( [&dResults] ( const PqMatchContextRef_t& tCtx ) { dResults.Add ( tCtx.m_pMatchCtx ); }, true );
// merge result set
PercolateMergeResults ( dResults, tRes );
dResults.Apply ( [] ( PercolateMatchContext_t *& pCtx ) { SafeDelete ( pCtx ); } );
}
bool PercolateIndex_c::MatchDocuments ( RtAccum_t * pAcc, PercolateMatchResult_t & tRes )
{
MEMORY ( MEM_INDEX_RT );
int64_t tmStart = sphMicroTimer();
if ( tRes.m_bVerbose )
tRes.m_tmSetup = -tmStart;
m_sLastWarning = "";
if ( !BindAccum ( pAcc ) )
return false;
// empty txn or no queries just ignore
if ( !pAcc->m_uAccumDocs || GetStored ().IsEmpty() )
{
pAcc->Cleanup ();
return true;
}
pAcc->Sort();
CSphString sError;
RtSegment_t * pSeg = CreateSegment ( pAcc, PERCOLATE_WORDS_PER_CP, m_tSettings.m_eHitless, m_dHitlessWords, sError );
assert ( !pSeg || pSeg->m_uRows>0 );
assert ( !pSeg || pSeg->m_tAliveRows>0 );
BuildSegmentInfixes ( pSeg, m_pDict->HasMorphology(), true, m_tSettings.m_iMinInfixLen,
PERCOLATE_WORDS_PER_CP, ( m_iMaxCodepointLength>1 ), m_tSettings.m_eHitless );
DoMatchDocuments ( pSeg, tRes );
SafeRelease ( pSeg );
// done; cleanup accum
pAcc->Cleanup ();
int64_t tmEnd = sphMicroTimer();
tRes.m_tmTotal = tmEnd - tmStart;
if ( tRes.m_iQueriesFailed )
tRes.m_sMessages.Warn ( "%d queries failed", tRes.m_iQueriesFailed );
return true;
}
void PercolateIndex_c::RollBack ( RtAccum_t * pAcc )
{
assert ( g_bRTChangesAllowed );
if ( BindAccum ( pAcc ) )
pAcc->Cleanup();
}
bool PercolateIndex_c::EarlyReject ( CSphQueryContext * pCtx, CSphMatch & tMatch ) const
{
if ( !pCtx->m_pFilter )
return false;
auto * pSegment = (const RtSegment_t*)pCtx->m_pIndexData;
tMatch.m_pStatic = pSegment->GetDocinfoByRowID ( tMatch.m_tRowID );
return !pCtx->m_pFilter->Eval ( tMatch );
}
void PercolateIndex_c::GetStatus ( CSphIndexStatus * pRes ) const
{
assert ( pRes );
if (!pRes)
return;
CSphString sError;
for ( const char * szExt : { "meta", "ram" } )
{
CSphAutofile fdRT ( GetFilename ( szExt ), SPH_O_READ, sError );
int64_t iFileSize = fdRT.GetSize ();
if ( iFileSize>0 )
pRes->m_iDiskUse += iFileSize; // that uses disk, but not occupies
}
pRes->m_iTID = m_iTID;
pRes->m_iSavedTID = m_iSavedTID;
int64_t iRamUse = 0;
int iMaxStack = 0;
{
ScRL_t rLock { m_tLock };
iRamUse = m_hQueries.GetLengthBytes();
iRamUse += m_dHitlessWords.GetLengthBytes64() + m_dLoadedQueries.GetLengthBytes64();
iRamUse += m_pQueries->GetLengthBytes64 ();
for ( auto & pItem : *m_pQueries )
{
iMaxStack = Max ( iMaxStack, pItem->m_iStackRequired );
iRamUse += sizeof ( StoredQuery_t ) + sizeof ( XQQuery_t )
+ pItem->m_dRejectTerms.GetLengthBytes64()
+ pItem->m_dRejectWilds.GetLengthBytes64()
+ pItem->m_dTags.GetLengthBytes64 ()
+ pItem->m_dFilterTree.GetLengthBytes64 ()
+ pItem->m_dFilters.GetLengthBytes64()
+ pItem->m_dSuffixes.GetLengthBytes()
+ pItem->m_sTags.Length()
+ pItem->m_sQuery.Length();
for ( const auto & sSuffix : pItem->m_dSuffixes )
iRamUse += sSuffix.Length();
}
}
pRes->m_iRamUse = iRamUse;
pRes->m_iStackNeed = iMaxStack;
pRes->m_iStackBase = StoredQuery_t::m_iStackBaseRequired;
pRes->m_iLockCount = GetNumOfLocks();
}
class XQTreeCompressor_t
{
CSphVector<XQNode_t *> m_dWords;
CSphVector<XQNode_t *> m_dChildren;
void WalkNodes ( XQNode_t * pNode )
{
if ( !pNode )
return;
if ( pNode->m_dWords.GetLength() && pNode->m_dWords.GetLength()!=pNode->m_dWords.GetLimit() )
m_dWords.Add ( pNode );
if ( pNode->m_dChildren.GetLength() && pNode->m_dChildren.GetLength()!=pNode->m_dChildren.GetLimit() )
m_dChildren.Add ( pNode );
for ( auto & tChild : pNode->m_dChildren )
WalkNodes ( tChild );
}
void Copy ()
{
// collect all old vectors then free them at once
CSphFixedVector< CSphVector<XQKeyword_t> > dWords2Free ( m_dWords.GetLength() );
CSphFixedVector< CSphVector<XQNode_t *> > dChildren2Free ( m_dChildren.GetLength() );
for ( int i=0; i<m_dWords.GetLength(); i++ )
{
auto & dSrcWords = m_dWords[i]->m_dWords;
int iLen = dSrcWords.GetLength();
CSphFixedVector<XQKeyword_t> dDstWords ( iLen );
dDstWords.CopyFrom ( dSrcWords );
dWords2Free[i].SwapData ( dSrcWords ); // remove all collected vectors m_pData on exit
dSrcWords.AdoptData ( dDstWords.LeakData(), iLen, iLen );
}
for ( int i=0; i<m_dChildren.GetLength(); i++ )
{
auto & dSrcChild = m_dChildren[i]->m_dChildren;
int iLen = dSrcChild.GetLength();
CSphFixedVector<XQNode_t *> dDstChildren ( iLen );
dDstChildren.CopyFrom ( dSrcChild );
dSrcChild.Resize ( 0 ); // XQNode_t poionter moved into new fixed-vector
dChildren2Free[i].SwapData ( dSrcChild ); // remove all collected vectors m_pData on exit
dSrcChild.AdoptData ( dDstChildren.LeakData(), iLen, iLen );
}
}
public:
void DoWork ( XQNode_t * pNode )
{
WalkNodes ( pNode );
Copy();
}
};
std::unique_ptr<StoredQuery_i> PercolateIndex_c::CreateQuery ( PercolateQueryArgs_t & tArgs, CSphString & sError )
{
{
ScRL_t tLockHash { m_tLock };
if ( !CanBeAdded ( tArgs, sError ))
return nullptr;
}
bool bWordDict = m_pDict->GetSettings().m_bWordDict;
TokenizerRefPtr_c pTokenizer = sphCloneAndSetupQueryTokenizer ( m_pTokenizer, IsStarDict ( bWordDict ), m_tSettings.m_bIndexExactWords, false );
DictRefPtr_c pDict = GetStatelessDict ( m_pDict );
if ( IsStarDict ( bWordDict ) )
SetupStarDictV8 ( pDict );
if ( m_tSettings.m_bIndexExactWords )
SetupExactDict ( pDict );
if ( tArgs.m_bQL )
return CreateQuery ( tArgs, pTokenizer, pDict, sError );
TokenizerRefPtr_c pTokenizerJson = sphCloneAndSetupQueryTokenizer ( m_pTokenizer, IsStarDict ( bWordDict ), m_tSettings.m_bIndexExactWords, true );
return CreateQuery ( tArgs, pTokenizerJson, pDict, sError );
}
static std::unique_ptr<QueryParser_i> CreatePlainQueryparser ( bool )
{
return sphCreatePlainQueryParser();
}
static CreateQueryParser_fn * g_pCreateQueryParser = CreatePlainQueryparser;
void SetPercolateQueryParserFactory ( CreateQueryParser_fn * pCall )
{
g_pCreateQueryParser = pCall;
}
static void FixExpandedNode ( XQNode_t * pNode )
{
assert ( pNode );
ARRAY_FOREACH ( i, pNode->m_dWords )
{
XQKeyword_t & tKw = pNode->m_dWords[i];
if ( sphHasExpandableWildcards ( tKw.m_sWord.cstr() ) )
{
tKw.m_bExpanded = true;
// that pointer has not owned by XQKeyword_t and will NOT be deleted
// however it should be !=nullptr to create ExtPayload_c at ranker
tKw.m_pPayload = (void *)1;
}
}
ARRAY_FOREACH ( i, pNode->m_dChildren )
FixExpandedNode ( pNode->m_dChildren[i] );
}
static XQNode_t * FixExpanded ( XQNode_t * pNode, int iMinPrefix, int iMinInfix, bool bExactForm )
{
ExpansionContext_t tExpCtx;
tExpCtx.m_iMinPrefixLen = iMinPrefix;
tExpCtx.m_iMinInfixLen = iMinInfix;
tExpCtx.m_bHasExactForms = bExactForm;
tExpCtx.m_bOnlyTreeFix = true;
pNode = sphExpandXQNode ( pNode, tExpCtx );
FixExpandedNode ( pNode );
return pNode;
}
bool PercolateIndex_c::CanBeAdded ( PercolateQueryArgs_t& tArgs, CSphString& sError ) const REQUIRES_SHARED ( m_tLock )
{
if ( tArgs.m_iQUID )
{
if ( tArgs.m_bReplace || !m_hQueries.Find ( tArgs.m_iQUID ) )
return true;
sError.SetSprintf ( "duplicate id '" INT64_FMT "'", tArgs.m_iQUID );
return false;
}
int64_t iQUID = 0;
do
iQUID = UidShort ();
while ( m_hQueries.Find( iQUID ) );
tArgs.m_iQUID = iQUID;
return true;
}
std::unique_ptr<StoredQuery_i> PercolateIndex_c::CreateQuery ( PercolateQueryArgs_t & tArgs, const TokenizerRefPtr_c& pTokenizer, const DictRefPtr_c& pDict, CSphString & sError )
{
const char * sQuery = tArgs.m_sQuery;
CSphVector<BYTE> dFiltered;
if ( m_pFieldFilter && sQuery && m_pFieldFilter->Clone()->Apply ( sQuery, dFiltered, true ) )
sQuery = (const char *)dFiltered.Begin();
auto tParsed = std::make_unique<XQQuery_t>();
std::unique_ptr<QueryParser_i> tParser = g_pCreateQueryParser ( !tArgs.m_bQL );
// right tokenizer created at upper level
if ( !tParser->ParseQuery ( *tParsed, sQuery, nullptr, pTokenizer, pTokenizer, &m_tSchema, pDict, m_tSettings, &m_tMorphFields ) )
{
sError = tParsed->m_sParseError;
return nullptr;
}
// FIXME!!! provide segments list instead index
sphTransformExtendedQuery ( &tParsed->m_pRoot, m_tSettings, false, nullptr );
bool bWordDict = m_pDict->GetSettings().m_bWordDict;
if ( m_tMutableSettings.m_iExpandKeywords!=KWE_DISABLED )
{
sphQueryExpandKeywords ( &tParsed->m_pRoot, m_tSettings, m_tMutableSettings.m_iExpandKeywords, bWordDict );
tParsed->m_pRoot->Check ( true );
}
// this should be after keyword expansion
TransformAotFilter ( tParsed->m_pRoot, pDict->GetWordforms(), m_tSettings );
if ( m_tSettings.GetMinPrefixLen ( bWordDict )>0 || m_tSettings.m_iMinInfixLen>0 )
tParsed->m_pRoot = FixExpanded ( tParsed->m_pRoot, m_tSettings.GetMinPrefixLen ( bWordDict ), m_tSettings.m_iMinInfixLen, ( pDict->HasMorphology () || m_tSettings.m_bIndexExactWords ) );
// FIXME!!! move whole m_pRoot/pStored->m_pXQ content into arena and use from there to reduce fragmentation
{
XQTreeCompressor_t tXQCompressor;
tXQCompressor.DoWork( tParsed->m_pRoot );
}
auto pStored = std::make_unique<StoredQuery_t>();
pStored->m_pXQ = std::move ( tParsed );
pStored->m_bOnlyTerms = true;
pStored->m_sQuery = sQuery;
QueryGetRejects ( pStored->m_pXQ->m_pRoot, pDict, pStored->m_dRejectTerms, pStored->m_dRejectWilds, pStored->m_dSuffixes, pStored->m_bOnlyTerms, ( m_iMaxCodepointLength>1 ) );
QueryGetTerms ( pStored->m_pXQ->m_pRoot, pDict, pStored->m_hDict );
pStored->m_sTags = tArgs.m_sTags;
PercolateTags ( tArgs.m_sTags, pStored->m_dTags );
pStored->m_iQUID = tArgs.m_iQUID;
pStored->m_dFilters.CopyFrom ( tArgs.m_dFilters );
pStored->m_dFilterTree.CopyFrom ( tArgs.m_dFilterTree );
pStored->m_bQL = tArgs.m_bQL;
// need keep m_bEmpty only in case query string is really empty string
// but use full-text matching path in case query has only out of charset_table chars
if ( pStored->m_pXQ->m_bEmpty && sQuery )
pStored->m_pXQ->m_bEmpty = IsEmpty ( FromSz ( sQuery ) );
CalcNecessaryStack ( pStored.get(), sError );
return pStored;
}
void PercolateIndex_c::CalcNecessaryStack ( StoredQuery_t* pStored, CSphString& sError )
{
if ( !pStored )
return;
int iStackForFt = -1;
if ( pStored->m_pXQ && pStored->m_pXQ->m_pRoot )
iStackForFt = ConsiderStackAbsolute ( pStored->m_pXQ->m_pRoot );
auto iTreeHeight = pStored->m_dFilterTree.IsEmpty() ? 0 : EvalMaxTreeHeight ( pStored->m_dFilterTree, pStored->m_dFilterTree.GetLength() - 1 );
int iStackForFilters = iTreeHeight * GetFilterStackItemSize() + GetStartFilterStackItemSize();
pStored->m_iStackRequired = Max ( iStackForFt, iStackForFilters );
}
template<typename READER>
static void LoadInsertDeleteQueries_T ( CSphVector<StoredQueryDesc_t>& dNewQueries, CSphVector<int64_t>& dDeleteQueries, CSphVector<uint64_t>& dDeleteTags, READER& tReader )
{
dDeleteTags.Resize ( tReader.UnzipInt() );
for ( auto& tTag: dDeleteTags )
tTag = tReader.UnzipOffset();
dDeleteQueries.Resize ( tReader.UnzipInt() );
for ( auto& tQuery: dDeleteQueries )
tQuery = tReader.UnzipOffset();
dNewQueries.Resize ( tReader.UnzipInt() );
for ( auto& tNewQuery: dNewQueries )
LoadStoredQuery ( PQ_META_VERSION_MAX, tNewQuery, tReader );
}
static void LoadInsertDeleteQueries ( CSphVector<StoredQueryDesc_t>& dNewQueries, CSphVector<int64_t>& dDeleteQueries, CSphVector<uint64_t>& dDeleteTags, CSphReader& tReader )
{
LoadInsertDeleteQueries_T ( dNewQueries, dDeleteQueries, dDeleteTags, tReader );
}
template<typename WRITER, typename QUERY>
static void SaveInsertDeleteQueries_T ( const VecTraits_T<QUERY> & dNewQueries, const VecTraits_T<int64_t> & dDeleteQueries, const VecTraits_T<uint64_t> & dDeleteTags, WRITER & tWriter )
{
tWriter.ZipInt ( dDeleteTags.GetLength() );
for ( uint64_t uTag : dDeleteTags )
tWriter.ZipOffset ( uTag );
tWriter.ZipInt ( dDeleteQueries.GetLength() );
for ( int64_t iQuery : dDeleteQueries )
tWriter.ZipOffset ( iQuery );
tWriter.ZipInt ( dNewQueries.GetLength() );
for ( StoredQuery_i* pQuery : dNewQueries )
SaveStoredQueryImpl ( *pQuery, tWriter );
}
template<typename QUERY>
static void SaveInsertDeleteQueries ( const VecTraits_T<QUERY> & dNewQueries, const VecTraits_T<int64_t> & dDeleteQueries, const VecTraits_T<uint64_t> & dDeleteTags, CSphVector<BYTE> & dOut )
{
MemoryWriter_c tWriter ( dOut );
SaveInsertDeleteQueries_T ( dNewQueries, dDeleteQueries, dDeleteTags, tWriter );
}
template<typename QUERY, typename WRITER>
static void SaveInsertDeleteQueries ( const VecTraits_T<QUERY> & dNewQueries, const VecTraits_T<int64_t> & dDeleteQueries, const VecTraits_T<uint64_t> & dDeleteTags, WRITER & tWriter )
{
SaveInsertDeleteQueries_T ( dNewQueries, dDeleteQueries, dDeleteTags, tWriter );
}
namespace {
// wrap original queries vec, since we might retry more than once.
// Also eliminate dupes, so that last one query with same quid win
CSphVector<StoredQuerySharedPtr_t> UniqAndWrapQueries ( const VecTraits_T<StoredQuery_i*>& dNewQueries )
{
CSphVector<StoredQuerySharedPtr_t> dNewSharedQueries;
dNewSharedQueries.Reserve ( dNewQueries.GetLength() );
OpenHashTable_T<int64_t, int> hQueries;
for ( StoredQuery_i* pQuery : dNewQueries )
{
int* pIdx = hQueries.Find ( pQuery->m_iQUID );
StoredQuerySharedPtr_t tNew { (StoredQuery_t*)pQuery };
if ( !pIdx )
{
hQueries.Add ( pQuery->m_iQUID, dNewSharedQueries.GetLength() );
dNewSharedQueries.Add ( tNew );
} else
dNewSharedQueries[*pIdx] = tNew;
}
return dNewSharedQueries;
}
} // namespace
int PercolateIndex_c::ReplayInsertAndDeleteQueries ( const VecTraits_T<StoredQuery_i*>& dNewQueries, const VecTraits_T<int64_t>& dDeleteQueries, const VecTraits_T<uint64_t>& dDeleteTags ) EXCLUDES ( m_tLock )
{
// wrap original queries vec, since we might retry more than once
auto dNewSharedQueries = UniqAndWrapQueries ( dNewQueries );
while ( true )
{
SharedPQSlice_t dElems;
int64_t iLimit = -1;
// will use this slice to actual deletion
VecTraits_T<int64_t> dAllToDelete = dDeleteQueries;
// collect deletes by tag
CSphVector<int64_t> dDeleteIdsAndTags;
if ( !dDeleteTags.IsEmpty() )
{
// for delete by tags we need snapshot of the current queries
{
ScRL_t rLock ( m_tLock );
dElems = GetStoredUnl();
iLimit = m_pQueries->GetLimit();
}
// collect all deletes from tags, to process them all uniform way then
for ( const StoredQuery_t* pQuery : dElems )
if ( !pQuery->m_dTags.IsEmpty() && TagsMatched ( dDeleteTags, pQuery->m_dTags ) )
dDeleteIdsAndTags.Add ( pQuery->m_iQUID );
if ( !dDeleteIdsAndTags.IsEmpty() )
{
dDeleteIdsAndTags.Append ( dDeleteQueries );
dDeleteIdsAndTags.Uniq();
dAllToDelete = dDeleteIdsAndTags;
}
}
// for both deletion and addition we need hash and snapshot
OpenHashTable_T<int64_t, int> hQueries { 0 };
{
ScRL_t rLock ( m_tLock );
if ( iLimit<0 )
{
dElems = GetStoredUnl();
iLimit = m_pQueries->GetLimit();
} else if ( dElems.Generation() != m_iGeneration )
continue;
hQueries = m_hQueries;
}
StoredQuerySharedPtrVecSharedPtr_t pNewVec;
bool bWithFullClone = false;
int iDeleted = 0;
// delete by id pass (deletes by tags are also collected here)
for ( int64_t iQuery : dAllToDelete )
{
auto* pIdx = hQueries.Find ( iQuery );
if ( !pIdx )
continue;
if ( !bWithFullClone ) // first virgin hit, need to make heavy full clone of the queries
{
bWithFullClone = true;
pNewVec = new CSphVector<StoredQuerySharedPtr_t>;
pNewVec->Reserve ( iLimit );
for ( auto& iElem : dElems )
pNewVec->Add ( iElem );
}
auto iIdx = *pIdx;
hQueries.Delete ( iQuery );
if ( iQuery != pNewVec->Last()->m_iQUID )
*hQueries.Find ( pNewVec->Last()->m_iQUID ) = iIdx; // fixup to removeFast
pNewVec->RemoveFast ( iIdx );
++iDeleted;
}
// insert/replace pass
// check whether we can insert the fastest possible way, or need to modify snapshot and only then insert
if ( !bWithFullClone )
{
for ( const auto& pQuery : dNewSharedQueries )
{
if ( hQueries.Find ( pQuery->m_iQUID ) && !bWithFullClone )
{
bWithFullClone = true;
pNewVec = new CSphVector<StoredQuerySharedPtr_t>;
pNewVec->Reserve ( iLimit );
for ( auto& iElem : dElems )
pNewVec->Add ( iElem );
break;
}
}
}
// perform inserts into clone
int64_t iNewInserted = 0;
if ( bWithFullClone )
{
for ( auto& pQuery : dNewSharedQueries )
{
int* pIdx = hQueries.Find ( pQuery->m_iQUID );
if ( !pIdx )
{
hQueries.Add ( pQuery->m_iQUID, pNewVec->GetLength() );
pNewVec->Add ( pQuery );
++iNewInserted;
} else
( *pNewVec )[*pIdx] = pQuery;
}
}
ScWL_t wLock ( m_tLock );
if ( dElems.Generation() != m_iGeneration )
continue;
if ( bWithFullClone )
{
m_pQueries = pNewVec;
m_hQueries = std::move(hQueries);
++m_iGeneration;
} else {
for ( auto& pQuery : dNewSharedQueries )
{
assert ( !hQueries.Find ( pQuery->m_iQUID ) );
AddToStoredUnl ( pQuery );
++iNewInserted;
}
}
m_tStat.m_iTotalDocuments += iNewInserted - iDeleted;
CSphString sError;
Binlog::Commit ( &m_iTID, GetName(), sError, [&dNewSharedQueries, dDeleteQueries, dDeleteTags] ( Writer_i & tWriter ) {
// my user op
tWriter.PutByte ( Binlog::PQ_ADD_DELETE );
SaveInsertDeleteQueries ( dNewSharedQueries, dDeleteQueries, dDeleteTags, tWriter );
} );
return iDeleted;
}
}
bool PercolateIndex_c::Commit ( int * pDeleted, RtAccum_t * pAcc, CSphString* )
{
assert ( g_bRTChangesAllowed );
if ( !BindAccum ( pAcc ) )
return true;
CSphVector<StoredQuery_i*> dNewQueries; // not owned
CSphVector<int64_t> dDeleteQueries;
CSphVector<uint64_t> dDeleteTags;
for ( auto& pCmd : pAcc->m_dCmd )
{
switch ( pCmd->m_eCommand )
{
case ReplCmd_e::PQUERY_ADD:
dNewQueries.Add ( pCmd->m_pStored.release() );
break;
case ReplCmd_e::PQUERY_DELETE:
if ( pCmd->m_dDeleteQueries.GetLength() )
dDeleteQueries.Append ( pCmd->m_dDeleteQueries );
else
PercolateAppendTags ( pCmd->m_sDeleteTags, dDeleteTags );
break;
default:
sphWarning ( "table %s: unsupported command %d", GetName(), (int)pCmd->m_eCommand );
}
}
dDeleteTags.Uniq();
dDeleteQueries.Uniq();
int iDeleted = ReplayInsertAndDeleteQueries ( dNewQueries, dDeleteQueries, dDeleteTags );
pAcc->Cleanup();
if ( pDeleted )
*pDeleted = iDeleted;
return true;
}
Binlog::CheckTnxResult_t PercolateIndex_c::ReplayTxn ( CSphReader& tReader, CSphString & sError, BYTE uOp, Binlog::CheckTxn_fn&& fnCanContinue )
{
assert ( uOp == Binlog::PQ_ADD_DELETE );
CSphVector<StoredQueryDesc_t> dNewQueriesDescs;
CSphVector<int64_t> dDeleteQueries;
CSphVector<uint64_t> dDeleteTags;
LoadInsertDeleteQueries ( dNewQueriesDescs, dDeleteQueries, dDeleteTags, tReader );
Binlog::CheckTnxResult_t tRes = fnCanContinue ();
if ( tRes.m_bValid && tRes.m_bApply )
{
CSphVector<StoredQuery_i *> dNewQueries; // not owned
dNewQueries.Reserve ( dNewQueries.GetLength () );
for ( StoredQueryDesc_t & tDesc: dNewQueriesDescs )
{
PercolateQueryArgs_t tArgs ( tDesc );
// at binlog query already passed replace checks
tArgs.m_bReplace = true;
// actually replay
auto pQuery = CreateQuery ( tArgs, sError );
if ( !pQuery )
{
sError.SetSprintf ( "apply error, %s", sError.cstr () );
tRes = Binlog::CheckTnxResult_t ();
for ( StoredQuery_i * pDelQuery: dNewQueries )
SafeDelete ( pDelQuery );
return tRes;
}
dNewQueries.Add ( pQuery.release () );
}
// actually replay
ReplayInsertAndDeleteQueries ( dNewQueries, dDeleteQueries, dDeleteTags );
tRes.m_bApply = true;
}
return tRes;
}
class PqMatchProcessor_c : public MatchProcessor_i, ISphNoncopyable
{
public:
PqMatchProcessor_c ( int iTag, const CSphQueryContext & tCtx )
: m_iTag ( iTag )
, m_tCtx ( tCtx )
{}
bool ProcessInRowIdOrder() const final { return false; }
void Process ( CSphMatch * pMatch ) final { ProcessMatch(pMatch); }
void Process ( VecTraits_T<CSphMatch *> & dMatches ) final { dMatches.for_each ( [this]( CSphMatch * pMatch ){ ProcessMatch(pMatch); } ); }
private:
int m_iTag;
const CSphQueryContext & m_tCtx;
inline void ProcessMatch ( CSphMatch * pMatch )
{
// fixme! tag is signed int,
// for distr. tags from remotes set with | 0x80000000,
// i e in terms of signed int they're <0!
// Is it intention, or bug?
// If intention, lt us use uniformely either <0, either &0x80000000
// conditions to avoid messing. If bug, shit already happened!
if ( pMatch->m_iTag>=0 )
return;
m_tCtx.CalcFinal ( *pMatch );
pMatch->m_iTag = m_iTag;
}
};
bool PercolateIndex_c::MultiScan ( CSphQueryResult & tResult, const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter *> & dSorters, const CSphMultiQueryArgs & tArgs ) const
{
assert ( tArgs.m_iTag>=0 );
auto & tMeta = *tResult.m_pMeta;
QueryProfile_c * pProfiler = tMeta.m_pProfile;
// we count documents only (before filters)
if ( tQuery.m_iMaxPredictedMsec )
tMeta.m_bHasPrediction = true;
if ( tArgs.m_uPackedFactorFlags & SPH_FACTOR_ENABLE )
tMeta.m_sWarning.SetSprintf ( "packedfactors() will not work with a fullscan; you need to specify a query" );
// start counting
int64_t tmQueryStart = sphMicroTimer ();
MiniTimer_c dTimerGuard;
int64_t tmMaxTimer = dTimerGuard.Engage ( tQuery.m_uMaxQueryMsec ); // max_query_time
// select the sorter with max schema
// uses GetAttrsCount to get working facets (was GetRowSize)
int iMaxSchemaIndex = GetMaxSchemaIndexAndMatchCapacity ( dSorters ).first;
const ISphSchema & tMaxSorterSchema = *( dSorters[iMaxSchemaIndex]->GetSchema ());
auto dSorterSchemas = SorterSchemas ( dSorters, iMaxSchemaIndex );
// setup calculations and result schema
CSphQueryContext tCtx ( tQuery );
if ( !tCtx.SetupCalc ( tMeta, tMaxSorterSchema, m_tMatchSchema, nullptr, nullptr, dSorterSchemas ) )
return false;
// setup filters
CreateFilterContext_t tFlx;
tFlx.m_pFilters = &tQuery.m_dFilters;
tFlx.m_pFilterTree = &tQuery.m_dFilterTree;
tFlx.m_pMatchSchema = &tMaxSorterSchema;
tFlx.m_pIndexSchema = &m_tSchema;
tFlx.m_eCollation = tQuery.m_eCollation;
tFlx.m_bScan = true;
if ( !tCtx.CreateFilters ( tFlx, tMeta.m_sError, tMeta.m_sWarning ) )
return false;
// get all locators
auto iIDidx = m_tMatchSchema.GetAttrIndex ( sphGetDocidName () );
const CSphColumnInfo & dID = m_tMatchSchema.GetAttr ( iIDidx );
const CSphColumnInfo & dColQuery = m_tMatchSchema.GetAttr ( iIDidx+1 );
const CSphColumnInfo & dColTags = m_tMatchSchema.GetAttr ( iIDidx+2 );
const CSphColumnInfo & dColFilters = m_tMatchSchema.GetAttr ( iIDidx+3 );
#if PARANOID
assert ( m_tMatchSchema.GetAttrIndex ( "query" )==iIDidx + 1 );
assert ( m_tMatchSchema.GetAttrIndex ( "tags" )==iIDidx + 2 );
assert ( m_tMatchSchema.GetAttrIndex ( "filters" )==iIDidx + 3 );
#endif
StringBuilder_c sFilters;
// prepare to work them rows
bool bRandomize = dSorters[0]->IsRandom();
CSphMatch tMatch;
// note: we reserve dynamic area in match using max sorter schema, but then fill it by locators from index schema.
// that works relying that sorter always includes all attrs from index, leaving final selection of cols
// to result minimizer. Once we try to pre-optimize sorter schema by select list, it will cause crashes here.
tMatch.Reset ( tMaxSorterSchema.GetDynamicSize () );
tMatch.m_iWeight = tArgs.m_iIndexWeight;
// fixme! tag also used over bitmask | 0x80000000,
// which marks that match comes from remote.
// using -1 might be also interpreted as 0xFFFFFFFF in such context!
// Does it intended?
tMatch.m_iTag = tCtx.m_dCalcFinal.GetLength () ? -1 : tArgs.m_iTag;
CSphScopedProfile tProf ( pProfiler, SPH_QSTATE_FULLSCAN );
int iCutoff = ( tQuery.m_iCutoff<=0 ) ? -1 : tQuery.m_iCutoff;
BYTE * pData = nullptr;
CSphVector<PercolateQueryDesc> dQueries;
auto dStored = GetStored();
const int64_t& iCheckTimePoint { Threads::Coro::GetNextTimePointUS() };
Threads::Coro::HighFreqChecker_c fnHeavyCheck;
for ( const StoredQuery_t * pQuery : dStored )
{
tMatch.SetAttr ( dID.m_tLocator, pQuery->m_iQUID );
int iLen = pQuery->m_sQuery.Length ();
tMatch.SetAttr ( dColQuery.m_tLocator, (SphAttr_t) sphPackPtrAttr ( iLen, &pData ) );
memcpy ( pData, pQuery->m_sQuery.cstr (), iLen );
if ( pQuery->m_sTags.IsEmpty () )
tMatch.SetAttr ( dColTags.m_tLocator, ( SphAttr_t ) 0 );
else {
iLen = pQuery->m_sTags.Length();
tMatch.SetAttr ( dColTags.m_tLocator, ( SphAttr_t ) sphPackPtrAttr ( iLen, &pData ) );
memcpy ( pData, pQuery->m_sTags.cstr (), iLen );
}
sFilters.Clear ();
if ( pQuery->m_dFilters.GetLength () )
FormatFiltersQL ( pQuery->m_dFilters, pQuery->m_dFilterTree, sFilters );
iLen = sFilters.GetLength ();
tMatch.SetAttr ( dColFilters.m_tLocator, ( SphAttr_t ) sphPackPtrAttr ( iLen, &pData ) );
memcpy ( pData, sFilters.cstr (), iLen );
++tMeta.m_tStats.m_iFetchedDocs;
tCtx.CalcFilter ( tMatch );
if ( tCtx.m_pFilter && !tCtx.m_pFilter->Eval ( tMatch ) )
{
tCtx.FreeDataFilter ( tMatch );
m_tMatchSchema.FreeDataPtrs ( tMatch );
continue;
}
if ( bRandomize )
tMatch.m_iWeight = ( sphRand () & 0xffff ) * tArgs.m_iIndexWeight;
// submit match to sorters
tCtx.CalcSort ( tMatch );
bool bNewMatch = false;
dSorters.Apply ( [&tMatch, &bNewMatch] ( ISphMatchSorter * p ) { bNewMatch |= p->Push ( tMatch ); } );
// stringptr expressions should be duplicated (or taken over) at this point
tCtx.FreeDataFilter ( tMatch );
tCtx.FreeDataSort ( tMatch );
m_tMatchSchema.FreeDataPtrs ( tMatch );
// handle cutoff
if ( bNewMatch && --iCutoff==0 )
break;
// handle timer
if ( sph::TimeExceeded ( tmMaxTimer ) )
{
tMeta.m_sWarning = "query time exceeded max_query_time";
break;
}
if ( fnHeavyCheck() && sph::TimeExceeded ( iCheckTimePoint ) )
{
if ( session::GetKilled() )
{
tMeta.m_sWarning = "query was killed";
break;
}
Threads::Coro::RescheduleAndKeepCrashQuery();
}
}
SwitchProfile ( pProfiler, SPH_QSTATE_FINALIZE );
// do final expression calculations
if ( tCtx.m_dCalcFinal.GetLength () )
{
PqMatchProcessor_c tFinal ( tArgs.m_iTag, tCtx );
dSorters.Apply ( [&] ( ISphMatchSorter * p ) { p->Finalize ( tFinal, false, tArgs.m_bFinalizeSorters ); } );
}
tMeta.m_iQueryTime += ( int ) ( ( sphMicroTimer () - tmQueryStart ) / 1000 );
return true; // fixme! */
}
bool PercolateIndex_c::MultiQuery ( CSphQueryResult & tResult, const CSphQuery & tQuery,
const VecTraits_T<ISphMatchSorter *> & dAllSorters, const CSphMultiQueryArgs &tArgs ) const
{
MEMORY ( MEM_DISK_QUERY );
// to avoid the checking of a ppSorters's element for NULL on every next step, just filter out all nulls right here
CSphVector<ISphMatchSorter *> dSorters;
dSorters.Reserve ( dAllSorters.GetLength() );
dAllSorters.Apply ([&dSorters] ( ISphMatchSorter* p) { if ( p ) dSorters.Add(p); });
// if we have anything to work with
if ( dSorters.IsEmpty() )
return false;
// non-random at the start, random at the end
dSorters.Sort ( CmpPSortersByRandom_fn () );
const QueryParser_i * pQueryParser = tQuery.m_pQueryParser;
assert ( pQueryParser );
// fast path for scans
if ( pQueryParser->IsFullscan ( tQuery ) )
return MultiScan ( tResult, tQuery, dSorters, tArgs );
return false;
}
void PercolateIndex_c::PostSetupUnl()
{
PercolateIndex_i::PostSetup();
m_iMaxCodepointLength = m_pTokenizer->GetMaxCodepointLength();
// bigram filter
if ( m_tSettings.m_eBigramIndex!=SPH_BIGRAM_NONE && m_tSettings.m_eBigramIndex!=SPH_BIGRAM_ALL )
{
m_pTokenizer->SetBuffer ( (BYTE*)const_cast<char*> ( m_tSettings.m_sBigramWords.cstr() ), m_tSettings.m_sBigramWords.Length() );
for ( auto * pTok = m_pTokenizer->GetToken (); pTok; pTok = m_pTokenizer->GetToken () )
m_tSettings.m_dBigramWords.Add() = (const char*)pTok;
m_tSettings.m_dBigramWords.Sort();
}
// FIXME!!! handle error
m_pTokenizerIndexing = m_pTokenizer->Clone ( SPH_CLONE_INDEX );
Tokenizer::AddBigramFilterTo ( m_pTokenizerIndexing, m_tSettings.m_eBigramIndex, m_tSettings.m_sBigramWords, m_sLastError );
if ( m_tSettings.m_uAotFilterMask )
sphAotTransformFilter ( m_pTokenizerIndexing, m_pDict, m_tSettings.m_bIndexExactWords, m_tSettings.m_uAotFilterMask );
// SPZ and zones setup
if ( ( m_tSettings.m_bIndexSP && !m_pTokenizerIndexing->EnableSentenceIndexing ( m_sLastError ) ) ||
( !m_tSettings.m_sZones.IsEmpty () && !m_pTokenizerIndexing->EnableZoneIndexing ( m_sLastError )) )
m_pTokenizerIndexing = nullptr;
bool bWordDict = m_pDict->GetSettings().m_bWordDict;
// create queries
TokenizerRefPtr_c pTokenizer = sphCloneAndSetupQueryTokenizer ( m_pTokenizer, IsStarDict ( bWordDict ), m_tSettings.m_bIndexExactWords, false );
TokenizerRefPtr_c pTokenizerJson = sphCloneAndSetupQueryTokenizer ( m_pTokenizer, IsStarDict ( bWordDict ), m_tSettings.m_bIndexExactWords, true );
DictRefPtr_c pDict = GetStatelessDict ( m_pDict );
if ( IsStarDict ( bWordDict ) )
SetupStarDictV8 ( pDict );
if ( m_tSettings.m_bIndexExactWords )
SetupExactDict ( pDict );
CSphString sHitlessFiles = m_tSettings.m_sHitlessFiles;
if ( GetIndexFilenameBuilder() )
{
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder = GetIndexFilenameBuilder() ( GetName() );
if ( pFilenameBuilder )
sHitlessFiles = pFilenameBuilder->GetFullPath ( sHitlessFiles );
}
// hitless
if ( !LoadHitlessWords ( sHitlessFiles, m_pTokenizerIndexing, m_pDict, m_dHitlessWords, m_sLastError ) )
sphWarning ( "table '%s': %s", GetName(), m_sLastError.cstr() );
m_pQueries->ReserveGap( m_dLoadedQueries.GetLength () );
CSphString sError;
for ( const StoredQueryDesc_t& tQuery : m_dLoadedQueries )
{
const TokenizerRefPtr_c& pTok = tQuery.m_bQL ? pTokenizer : pTokenizerJson;
PercolateQueryArgs_t tArgs ( tQuery );
if ( CanBeAdded ( tArgs, sError ) )
{
auto pQuery = CreateQuery ( tArgs, pTok, pDict, sError );
if ( pQuery )
{
// as a new (not replace), query it will be anyway added to the tail.
// so, we may reserve ref in the hash, and then occupy it with the query.
assert ( !tArgs.m_bReplace );
AddToStoredUnl ( StoredQuerySharedPtr_t ((StoredQuery_t *) pQuery.release() ));
continue;
}
}
sphWarning ( "table '%s': %d (id=" INT64_FMT ") query failed to load, ignoring", GetName(), m_dLoadedQueries.Idx ( &tQuery ), tQuery.m_iQUID );
}
m_dLoadedQueries.Reset ( 0 );
// still need index files for index just created from config
if ( !m_bHasFiles )
SaveMeta ( SharedPQSlice_t ( m_pQueries ) );
}
void PercolateIndex_c::PostSetup () EXCLUDES ( m_tLock )
{
ScWL_t wLock ( m_tLock );
PostSetupUnl();
}
// load old-style (legacy) binary meta
PercolateIndex_c::LOAD_E PercolateIndex_c::LoadMetaLegacy ( const CSphString& sMeta, bool bStripPath, FilenameBuilder_i* pFilenameBuilder, StrVec_t& dWarnings )
{
/////////////
// load meta
/////////////
m_sLastError = "";
// opened and locked, lets read
CSphAutoreader rdMeta;
if ( !rdMeta.Open ( sMeta, m_sLastError ) )
return LOAD_E::GeneralError_e;
if ( rdMeta.GetDword() != META_HEADER_MAGIC )
{
m_sLastError.SetSprintf ( "invalid meta file %s", sMeta.cstr() );
return LOAD_E::ParseError_e;
}
DWORD uVersion = rdMeta.GetDword();
if ( uVersion == 0 || uVersion > META_VERSION )
{
m_sLastError.SetSprintf ( "%s is v.%u, binary is v.%u", sMeta.cstr(), uVersion, META_VERSION );
return LOAD_E::GeneralError_e;
}
// we don't support anything prior to v8
DWORD uMinFormatVer = 8;
if ( uVersion < uMinFormatVer )
{
m_sLastError.SetSprintf ( "tables prior to v.%u are no longer supported (use index_converter tool); %s is v.%u", uMinFormatVer, GetFilebase(), uVersion );
return LOAD_E::GeneralError_e;
}
DWORD uIndexVersion = rdMeta.GetDword();
CSphTokenizerSettings tTokenizerSettings;
CSphDictSettings tDictSettings;
CSphEmbeddedFiles tEmbeddedFiles;
// load settings
ReadSchema ( rdMeta, m_tSchema, uIndexVersion );
LoadIndexSettings ( m_tSettings, rdMeta, uIndexVersion );
if ( !tTokenizerSettings.Load ( pFilenameBuilder, rdMeta, tEmbeddedFiles, m_sLastError ) )
return LOAD_E::GeneralError_e;
tDictSettings.Load ( rdMeta, tEmbeddedFiles, pFilenameBuilder, m_sLastWarning );
// initialize AOT if needed
DWORD uPrevAot = m_tSettings.m_uAotFilterMask;
m_tSettings.m_uAotFilterMask = sphParseMorphAot ( tDictSettings.m_sMorphology.cstr() );
if ( m_tSettings.m_uAotFilterMask!=uPrevAot )
sphWarning ( "table '%s': morphology option changed from config has no effect, ignoring", GetName() );
if ( bStripPath )
{
StripPath ( tTokenizerSettings.m_sSynonymsFile );
ARRAY_FOREACH ( i, tDictSettings.m_dWordforms )
StripPath ( tDictSettings.m_dWordforms[i] );
}
// recreate tokenizer
m_pTokenizer = Tokenizer::Create ( tTokenizerSettings, &tEmbeddedFiles, pFilenameBuilder, dWarnings, m_sLastError );
if ( !m_pTokenizer )
return LOAD_E::GeneralError_e;
// recreate dictionary
m_pDict = sphCreateDictionaryCRC ( tDictSettings, &tEmbeddedFiles, m_pTokenizer, GetName(), bStripPath, m_tSettings.m_iSkiplistBlockSize, pFilenameBuilder, m_sLastError );
if ( !m_pDict )
{
m_sLastError.SetSprintf ( "table '%s': %s", GetName(), m_sLastError.cstr() );
return LOAD_E::GeneralError_e;
}
Tokenizer::AddToMultiformFilterTo ( m_pTokenizer, m_pDict->GetMultiWordforms () );
// regexp and ICU
if ( uVersion>=6 )
{
std::unique_ptr<ISphFieldFilter> pFieldFilter;
CSphFieldFilterSettings tFieldFilterSettings;
tFieldFilterSettings.Load(rdMeta);
if ( tFieldFilterSettings.m_dRegexps.GetLength() )
pFieldFilter = sphCreateRegexpFilter ( tFieldFilterSettings, m_sLastError );
if ( !sphSpawnFilterICU ( pFieldFilter, m_tSettings, tTokenizerSettings, sMeta.cstr(), m_sLastError ) )
return LOAD_E::GeneralError_e;
SetFieldFilter ( std::move ( pFieldFilter ) );
}
// queries
DWORD uQueries = rdMeta.GetDword();
m_dLoadedQueries.Reset ( uQueries );
if ( uVersion<7)
{
for ( auto& tQuery : m_dLoadedQueries )
LoadStoredQueryV6 ( uVersion, tQuery, rdMeta);
} else
{
for ( auto & tQuery : m_dLoadedQueries )
LoadStoredQuery ( uVersion, tQuery, rdMeta );
m_iTID = rdMeta.GetOffset ();
}
m_tStat.m_iTotalDocuments = uQueries;
m_iSavedTID = m_iTID;
return LOAD_E::Ok_e;
}
void LoadStoredQueryJson ( StoredQueryDesc_t& tQuery, const bson::Bson_c& tNode );
// load new (json) meta
PercolateIndex_c::LOAD_E PercolateIndex_c::LoadMetaJson ( const CSphString& sMeta, bool bStripPath, FilenameBuilder_i* pFilenameBuilder, StrVec_t& dWarnings )
{
using namespace bson;
CSphVector<BYTE> dData;
if ( !sphJsonParse ( dData, sMeta, m_sLastError ) )
return LOAD_E::ParseError_e;
Bson_c tBson ( dData );
if ( tBson.IsEmpty() || !tBson.IsAssoc() )
{
m_sLastError = "Something wrong read from json meta - it is either empty, either not root object.";
return LOAD_E::ParseError_e;
}
// version
DWORD uVersion = (DWORD)Int ( tBson.ChildByName ( "meta_version" ), 9 );
if ( uVersion == 10 ) uVersion = 9; // fixme! a little hack, m.b. deal another way? v10 is minor of v9
if ( uVersion == 0 || uVersion > META_VERSION )
{
m_sLastError.SetSprintf ( "%s is v.%u, binary is v.%u", sMeta.cstr(), uVersion, META_VERSION );
return LOAD_E::GeneralError_e;
}
// we don't support anything prior to v8
DWORD uMinFormatVer = 9;
if ( uVersion < uMinFormatVer )
{
m_sLastError.SetSprintf ( "tables prior to v.%u are no longer supported (use index_converter tool); %s is v.%u", uMinFormatVer, GetFilebase(), uVersion );
return LOAD_E::GeneralError_e;
}
// DWORD uIndexVersion = (DWORD)Int ( tBson.ChildByName ( "index_format_version" ) );
CSphTokenizerSettings tTokenizerSettings;
CSphDictSettings tDictSettings;
CSphEmbeddedFiles tEmbeddedFiles;
// load settings
ReadSchemaJson ( tBson.ChildByName ( "schema" ), m_tSchema );
LoadIndexSettingsJson ( tBson.ChildByName ( "index_settings" ), m_tSettings );
if ( !tTokenizerSettings.Load ( pFilenameBuilder, tBson.ChildByName ( "tokenizer_settings"), tEmbeddedFiles, m_sLastError ) )
return LOAD_E::GeneralError_e;
tDictSettings.Load ( tBson.ChildByName ( "dictionary_settings" ), tEmbeddedFiles, pFilenameBuilder, m_sLastWarning );
// initialize AOT if needed
DWORD uPrevAot = m_tSettings.m_uAotFilterMask;
m_tSettings.m_uAotFilterMask = sphParseMorphAot ( tDictSettings.m_sMorphology.cstr() );
if ( m_tSettings.m_uAotFilterMask != uPrevAot )
sphWarning ( "table '%s': morphology option changed from config has no effect, ignoring", GetName() );
if ( bStripPath )
{
StripPath ( tTokenizerSettings.m_sSynonymsFile );
ARRAY_FOREACH ( i, tDictSettings.m_dWordforms )
StripPath ( tDictSettings.m_dWordforms[i] );
}
// recreate tokenizer
m_pTokenizer = Tokenizer::Create ( tTokenizerSettings, &tEmbeddedFiles, pFilenameBuilder, dWarnings, m_sLastError );
if ( !m_pTokenizer )
return LOAD_E::GeneralError_e;
// recreate dictionary
m_pDict = sphCreateDictionaryCRC ( tDictSettings, &tEmbeddedFiles, m_pTokenizer, GetName(), bStripPath, m_tSettings.m_iSkiplistBlockSize, pFilenameBuilder, m_sLastError );
if ( !m_pDict )
{
m_sLastError.SetSprintf ( "table '%s': %s", GetName(), m_sLastError.cstr() );
return LOAD_E::GeneralError_e;
}
Tokenizer::AddToMultiformFilterTo ( m_pTokenizer, m_pDict->GetMultiWordforms() );
// regexp and ICU
std::unique_ptr<ISphFieldFilter> pFieldFilter;
auto tFieldFilterSettingsNode = tBson.ChildByName ( "field_filter_settings" );
if ( !IsNullNode ( tFieldFilterSettingsNode ) )
{
CSphFieldFilterSettings tFieldFilterSettings;
Bson_c ( tFieldFilterSettingsNode ).ForEach ( [&tFieldFilterSettings] ( const NodeHandle_t& tNode ) {
tFieldFilterSettings.m_dRegexps.Add ( String ( tNode ) );
} );
if ( !tFieldFilterSettings.m_dRegexps.IsEmpty() )
pFieldFilter = sphCreateRegexpFilter ( tFieldFilterSettings, m_sLastError );
}
if ( !sphSpawnFilterICU ( pFieldFilter, m_tSettings, tTokenizerSettings, sMeta.cstr(), m_sLastError ) )
return LOAD_E::GeneralError_e;
SetFieldFilter ( std::move ( pFieldFilter ) );
m_iTID = Int ( tBson.ChildByName ( "tid" ) );
auto tIndexId = tBson.ChildByName ( "index_id" );
if ( !IsNullNode ( tIndexId ) )
m_iIndexId = Int ( tIndexId );
// queries
auto tQueriesNode = tBson.ChildByName ( "pqs" );
if ( !IsNullNode( tQueriesNode) )
{
Bson_c tQueriesVec { tQueriesNode };
m_dLoadedQueries.Reset ( tQueriesVec.CountValues() );
int iLastQ = 0;
tQueriesVec.ForEach ( [&iLastQ,this] ( const NodeHandle_t& tNode ) {
LoadStoredQueryJson ( m_dLoadedQueries[iLastQ++], tNode );
} );
}
m_tStat.m_iTotalDocuments = m_dLoadedQueries.GetLength();
m_iSavedTID = m_iTID;
return LOAD_E::Ok_e;
}
bool PercolateIndex_c::LoadMetaImpl ( const CSphString& sMeta, bool bStripPath, FilenameBuilder_i* pFilenameBuilder, StrVec_t& dWarnings )
{
auto eRes = LoadMetaJson ( sMeta, bStripPath, pFilenameBuilder, dWarnings );
if ( eRes == LOAD_E::ParseError_e )
{
sphInfo ( "Index meta format is not json, will try it as binary..." );
eRes = LoadMetaLegacy ( sMeta, bStripPath, pFilenameBuilder, dWarnings );
if ( eRes == LOAD_E::ParseError_e )
{
sphWarning ( "Unable to parse header... Error %s", m_sLastError.cstr() );
return false;
}
}
if ( eRes == LOAD_E::GeneralError_e )
{
sphWarning ( "Unable to load header... Error %s", m_sLastError.cstr() );
return false;
}
assert ( eRes == LOAD_E::Ok_e );
return true;
}
bool PercolateIndex_c::LoadMeta ( const CSphString& sMeta, bool bStripPath, FilenameBuilder_i* pFilenameBuilder, StrVec_t& dWarnings )
{
if ( LoadMetaImpl ( sMeta, bStripPath, pFilenameBuilder, dWarnings ) )
return true;
const char* szDumpPath = getenv ( "dump_corrupt_meta" );
if ( !szDumpPath )
return false;
CSphString sDestPath = SphSprintf("%s%s",szDumpPath,"index.meta");
CSphString sError;
if ( !CopyFile ( sMeta, sDestPath, sError ) )
sphWarning ( "%s", sError.cstr() );
return false;
}
bool PercolateIndex_c::Prealloc ( bool bStripPath, FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings )
{
CSphString sLock = GetFilename ( "lock" ); // notice: .lock vs .spl
m_iLockFD = ::open ( sLock.cstr(), SPH_O_NEW, 0644 );
if ( m_iLockFD < 0 )
{
m_sLastError.SetSprintf ( "failed to open %s: %s", sLock.cstr(), strerrorm( errno ) );
return false;
}
if ( !sphLockEx ( m_iLockFD, false ) )
{
m_sLastError.SetSprintf ( "failed to lock %s: %s", sLock.cstr(), strerrorm( errno ) );
::close ( m_iLockFD );
return false;
}
CSphString sMeta = GetFilename ( "meta" );
// no readable meta? no disk part yet
if ( !sphIsReadable ( sMeta.cstr() ) )
return true;
m_bHasFiles = true;
if ( !LoadMeta ( sMeta, bStripPath, pFilenameBuilder, dWarnings ) )
return false;
CSphString sMutableFile = GetFilename ( SPH_EXT_SETTINGS );
if ( !m_tMutableSettings.Load ( sMutableFile.cstr(), GetName() ) )
return false;
m_tmSaved = sphMicroTimer();
return true;
}
void operator<< ( JsonEscapedBuilder& tOut, const StoredQueryDesc_t& tQuery );
void PercolateIndex_c::SaveMeta ( const SharedPQSlice_t& dStored, bool bShutdown )
{
// sanity check
if ( m_iLockFD<0 || IsSaveDisabled() )
return;
// write new meta
CSphString sNameMeta = GetFilename("meta");
CSphString sNameMetaNew = GetFilename ( "meta.new" );
const int iBuffSize = 262144;
CSphString sError;
CSphWriter tMetaWriter;
tMetaWriter.SetBufferSize ( iBuffSize );
if ( !tMetaWriter.OpenFile ( sNameMetaNew, sError ) )
{
sphWarning ( "failed to serialize meta: %s", sError.cstr() );
return;
}
JsonEscapedBuilder sNewMeta;
sNewMeta.GrowEnough ( Min ( iBuffSize*2/3, dStored.GetLength() * 64 ) );
sNewMeta.Rewind();
sNewMeta.ObjectWBlock();
// human-readable sugar
sNewMeta.NamedString ( "meta_created_time_utc", sphCurrentUtcTime() );
sNewMeta.NamedVal ( "meta_version", META_VERSION );
sNewMeta.NamedVal ( "index_format_version", INDEX_FORMAT_VERSION );
sNewMeta.NamedVal ( "schema", m_tSchema );
sNewMeta.NamedVal ( "index_settings", m_tSettings );
sNewMeta.Named ( "tokenizer_settings" );
SaveTokenizerSettings ( sNewMeta, m_pTokenizer, m_tSettings.m_iEmbeddedLimit );
sNewMeta.Named ( "dictionary_settings" );
SaveDictionarySettings ( sNewMeta, m_pDict, false, m_tSettings.m_iEmbeddedLimit );
// meta v.6
CSphFieldFilterSettings tFieldFilterSettings;
if ( m_pFieldFilter )
m_pFieldFilter->GetSettings(tFieldFilterSettings);
sNewMeta.NamedVal ( "field_filter_settings", tFieldFilterSettings );
sNewMeta.NamedVal ( "tid", m_iTID );
{
sNewMeta.Named ( "pqs" );
auto _ = sNewMeta.ArrayW();
for ( const StoredQuery_t * pQuery : dStored )
{
sNewMeta << *pQuery;
// flush data on buffer grow
if ( sNewMeta.GetLength()>iBuffSize/2 )
{
tMetaWriter.PutString ( (Str_t)sNewMeta );
sNewMeta.Rewind();
}
}
}
sNewMeta.FinishBlocks();
tMetaWriter.PutString ( (Str_t)sNewMeta );
tMetaWriter.CloseFile();
sNewMeta.Clear();
// could be better to add check mode for PQ into indextool
// rename
if ( sph::rename ( sNameMetaNew.cstr(), sNameMeta.cstr() ) )
sphWarning ( "failed to rename meta (src=%s, dst=%s, errno=%d, error=%s)", sNameMetaNew.cstr(), sNameMeta.cstr(), errno, strerrorm( errno ) );
SaveMutableSettings ( m_tMutableSettings, GetFilename ( SPH_EXT_SETTINGS ) );
// notify binlog after file saved
Binlog::NotifyIndexFlush ( m_iTID, GetName(), (Binlog::Shutdown_e)bShutdown, Binlog::NoSave );
m_iSavedTID = m_iTID;
m_tmSaved = sphMicroTimer();
}
void PercolateIndex_c::SaveMeta ( bool bShutdown )
{
SaveMeta ( GetStored(), bShutdown );
}
bool PercolateIndex_c::Truncate ( CSphString & sError, Truncate_e eAction )
{
ScWL_t wLock ( m_tLock );
m_hQueries.Reset ( 256 );
m_pQueries = new CSphVector<StoredQuerySharedPtr_t>;
// update and save meta
// current TID will be saved, so replay will properly skip preceding txns
// FIXME!!! however it should be replicated to cluster maybe with TOI
SaveMeta ( SharedPQSlice_t ( m_pQueries ) );
// allow binlog to unlink now-redundant data files
Binlog::NotifyIndexFlush ( m_iTID, GetName (), Binlog::NoShutdown, eAction==TRUNCATE ? Binlog::ForceSave : Binlog::DropTable );
return true;
}
void PercolateMatchResult_t::Reset ()
{
m_bGetDocs = false;
m_bGetQuery = false;
m_bGetFilters = true;
m_iQueriesMatched = 0;
m_iQueriesFailed = 0;
m_iDocsMatched = 0;
m_tmTotal = 0;
m_bVerbose = false;
m_iEarlyOutQueries = 0;
m_iTotalQueries = 0;
m_iOnlyTerms = 0;
m_tmSetup = 0;
m_sMessages.Clear ();
m_dQueryDesc.Reset ( 0 );
m_dDocs.Reset ( 0 );
m_dQueryDT.Reset ( 0 );
}
PercolateMatchResult_t::PercolateMatchResult_t ( PercolateMatchResult_t&& rhs ) noexcept
: PercolateMatchResult_t()
{
Swap (rhs);
}
void PercolateMatchResult_t::Swap ( PercolateMatchResult_t& rhs ) noexcept
{
::Swap ( m_bGetDocs, rhs.m_bGetDocs );
::Swap ( m_bGetQuery, rhs.m_bGetQuery );
::Swap ( m_bGetFilters, rhs.m_bGetFilters );
::Swap ( m_iQueriesMatched, rhs.m_iQueriesMatched );
::Swap ( m_iQueriesFailed, rhs.m_iQueriesFailed );
::Swap ( m_iDocsMatched, rhs.m_iDocsMatched );
::Swap ( m_tmTotal, rhs.m_tmTotal );
::Swap ( m_bVerbose, rhs.m_bVerbose );
::Swap ( m_iEarlyOutQueries, rhs.m_iEarlyOutQueries );
::Swap ( m_iTotalQueries, rhs.m_iTotalQueries );
::Swap ( m_iOnlyTerms, rhs.m_iOnlyTerms );
::Swap ( m_tmSetup, rhs.m_tmSetup );
::Swap ( m_sMessages, rhs.m_sMessages );
::Swap ( m_dQueryDesc, rhs.m_dQueryDesc );
::Swap ( m_dDocs, rhs.m_dDocs );
::Swap ( m_dQueryDT, rhs.m_dQueryDT );
}
PercolateMatchResult_t&PercolateMatchResult_t::operator= ( PercolateMatchResult_t rhs ) noexcept
{
Swap ( rhs );
return *this;
}
void FixPercolateSchema ( CSphSchema & tSchema )
{
if ( !tSchema.GetFieldsCount() )
tSchema.AddField ( CSphColumnInfo ( "text" ) );
}
bool PercolateIndex_c::IsSameSettings ( CSphReconfigureSettings & tSettings, CSphReconfigureSetup & tSetup, StrVec_t & dWarnings, CSphString & sError ) const
{
tSetup.m_tSchema = tSettings.m_tSchema;
FixPercolateSchema ( tSetup.m_tSchema );
CSphString sTmp;
bool bSameSchema = m_tSchema.CompareTo ( tSettings.m_tSchema, sTmp, false );
return CreateReconfigure ( GetName(), IsStarDict ( m_pDict->GetSettings().m_bWordDict ), m_pFieldFilter.get(), m_tSettings, m_pTokenizer->GetSettingsFNV(),
m_pDict->GetSettingsFNV(), m_pTokenizer->GetMaxCodepointLength(), GetMemLimit(),
bSameSchema, tSettings, tSetup, dWarnings, sError );
}
bool PercolateIndex_c::Reconfigure ( CSphReconfigureSetup & tSetup )
{
m_tSchema = tSetup.m_tSchema;
Setup ( tSetup.m_tIndex );
SetTokenizer ( tSetup.m_pTokenizer );
SetDictionary ( tSetup.m_pDict );
SetFieldFilter ( std::move ( tSetup.m_pFieldFilter ) );
m_iMaxCodepointLength = m_pTokenizer->GetMaxCodepointLength();
SetupQueryTokenizer();
ScWL_t wLock ( m_tLock ); // ensure nothing will be changed during reconfigure pass.
m_dLoadedQueries.Reset ( m_pQueries->GetLength() );
ARRAY_FOREACH ( i, m_dLoadedQueries )
{
StoredQueryDesc_t & tQuery = m_dLoadedQueries[i];
const StoredQuery_t * pStored = (*m_pQueries) [i];
tQuery.m_iQUID = pStored->m_iQUID;
tQuery.m_sQuery = pStored->m_sQuery;
tQuery.m_sTags = pStored->m_sTags;
tQuery.m_dFilters.CopyFrom ( pStored->m_dFilters );
tQuery.m_dFilterTree.CopyFrom ( pStored->m_dFilterTree );
}
m_pQueries = new CSphVector<StoredQuerySharedPtr_t>;
m_hQueries.Clear();
// note: m_tLockHash and m_tLock is still held here.
PostSetupUnl();
return true;
}
bool PercolateIndex_c::IsFlushNeed() const
{
// m_iTID get managed by binlog that is why wo binlog there is no need to compare it
if ( Binlog::IsActive() && m_iTID<=m_iSavedTID )
return false;
return !IsSaveDisabled();
}
void PercolateIndex_c::ForceRamFlush ( const char * szReason )
{
if ( !IsFlushNeed() )
return;
int64_t tmStart = sphMicroTimer();
int64_t iWasTID = std::exchange ( m_iSavedTID, m_iTID );
int64_t tmWas = m_tmSaved;
SaveMeta ();
int64_t tmNow = sphMicroTimer();
int64_t tmAge = tmNow - tmWas;
int64_t tmSave = tmNow - tmStart;
sphInfo ( "percolate: table %s: saved ok (mode=%s, last TID=" INT64_FMT ", current TID=" INT64_FMT ", "
"time delta=%d sec, took=%d.%03d sec)", GetName(), szReason, iWasTID, m_iTID, (int) (tmAge/1000000), (int)(tmSave/1000000), (int)((tmSave/1000)%1000) );
}
bool PercolateIndex_c::ForceDiskChunk()
{
ForceRamFlush ( "forced" );
return true;
}
bool PercolateIndex_c::IsSaveDisabled() const noexcept
{
return GetNumOfLocks() > 0;
}
int PercolateIndex_c::GetNumOfLocks () const noexcept
{
return m_iDisabledCounter;
}
void PercolateIndex_c::ProhibitSave()
{
++m_iDisabledCounter;
}
void PercolateIndex_c::EnableSave()
{
if ( IsSaveDisabled() )
--m_iDisabledCounter;
}
void PercolateIndex_c::LockFileState ( StrVec_t & dFiles )
{
ForceRamFlush ( "forced" );
++m_iDisabledCounter;
GetIndexFiles ( dFiles, dFiles );
}
PercolateQueryArgs_t::PercolateQueryArgs_t ( const VecTraits_T<CSphFilterSettings> & dFilters, const VecTraits_T<FilterTreeItem_t> & dFilterTree )
: m_dFilters ( dFilters )
, m_dFilterTree ( dFilterTree )
{}
PercolateQueryArgs_t::PercolateQueryArgs_t ( const StoredQueryDesc_t & tDesc )
: PercolateQueryArgs_t ( tDesc.m_dFilters, tDesc.m_dFilterTree )
{
m_sQuery = tDesc.m_sQuery.cstr();
m_sTags = tDesc.m_sTags.cstr();
m_iQUID = tDesc.m_iQUID;
m_bQL = tDesc.m_bQL;
}
// stuff for merging several results into one
struct PQMergeResultsIterator_t
{
CPqResult * m_pResult = nullptr;
int m_iIdx = 0;
int m_iElems = 0;
int* m_pDocs = nullptr;
explicit PQMergeResultsIterator_t ( CPqResult * pMatch = nullptr )
: m_pResult ( pMatch )
{
if ( !pMatch )
return;
m_iElems = pMatch->m_dResult.m_dQueryDesc.GetLength ();
m_pDocs = pMatch->m_dResult.m_dDocs.begin();
}
inline PercolateQueryDesc & CurDesc () const
{ return m_pResult->m_dResult.m_dQueryDesc[m_iIdx]; }
inline int CurDt () const
{ return m_pResult->m_dResult.m_dQueryDT[m_iIdx]; }
static inline bool IsLess ( const PQMergeResultsIterator_t &a, const PQMergeResultsIterator_t &b )
{
return a.CurDesc().m_iQUID<b.CurDesc().m_iQUID;
}
};
void MergePqResults ( const VecTraits_T<CPqResult *> &dChunks, CPqResult &dRes, bool bSharded )
{
if ( dChunks.IsEmpty () )
return;
// check if we have exactly one non-null and non-empty result
if ( dChunks.GetLength ()==1 ) // short path for only 1 result.
{
auto dOldMsgs = std::move(dRes.m_dResult.m_sMessages);
dRes = std::move ( *dChunks[0] );
dRes.m_dResult.m_sMessages.AddStringsFrom ( dOldMsgs );
return;
}
assert ( dChunks.GetLength ()>1 ); // simplest cases must be already processed;
int iGotQueries = 0;
int iGotDocids = 0;
auto & dFinal = dRes.m_dResult; // shortcut
CSphQueue<PQMergeResultsIterator_t, PQMergeResultsIterator_t> qMatches ( dChunks.GetLength () );
for ( CPqResult * pChunk : dChunks )
{
assert ( pChunk ); // no nulls allowed
auto &dResult = pChunk->m_dResult; /// shortcut
// collect all warnings/errors/other things despite the num of collected results
dFinal.m_sMessages.AddStringsFrom ( dResult.m_sMessages );
dFinal.m_iTotalQueries = dResult.m_iTotalQueries + ( bSharded ? 0 : dFinal.m_iTotalQueries );
dFinal.m_iEarlyOutQueries += dResult.m_iEarlyOutQueries;
dFinal.m_iQueriesFailed += dResult.m_iQueriesFailed;
dFinal.m_iDocsMatched += dResult.m_iDocsMatched;
dFinal.m_tmTotal += dResult.m_tmTotal;
dFinal.m_tmSetup += dResult.m_tmSetup;
dFinal.m_bVerbose |= !dResult.m_dQueryDT.IsEmpty();
dFinal.m_bGetDocs |= !dResult.m_dDocs.IsEmpty();
dFinal.m_bGetQuery = dResult.m_bGetQuery;
// we interest only in filled results
if ( dResult.m_dQueryDesc.IsEmpty () )
continue;
qMatches.Push ( PQMergeResultsIterator_t ( pChunk ));
iGotQueries += dResult.m_dQueryDesc.GetLength ();
iGotDocids += pChunk->m_dDocids.GetLength();
}
if ( !iGotQueries )
return;
dFinal.m_iQueriesMatched = iGotQueries;
int iDocSpace = iGotQueries + dFinal.m_iDocsMatched;
dFinal.m_dQueryDesc.Reset ( iGotQueries );
PercolateQueryDesc * pDst = dFinal.m_dQueryDesc.Begin ();
int * pDt = nullptr;
if ( dFinal.m_bVerbose )
{
dFinal.m_dQueryDT.Reset ( iGotQueries );
pDt = dFinal.m_dQueryDT.begin ();
assert ( pDt );
}
int * pDocs = nullptr;
if ( dFinal.m_bGetDocs )
{
dFinal.m_dDocs.Reset ( iDocSpace );
pDocs = dFinal.m_dDocs.begin ();
assert ( pDocs );
}
OpenHashTable_T<int64_t, int> hDocids ( iGotDocids + 1 );
bool bHasDocids = iGotDocids!=0;
if ( bHasDocids )
hDocids.Add ( iGotDocids, 0 );
PQMergeResultsIterator_t tMin = qMatches.Root ();
qMatches.Pop ();
assert ( qMatches.GetLength ()>=0 ); // since case of only 1 resultset we already processed.
while ( true )
{
pDst->Swap ( tMin.CurDesc () );
++pDst;
if ( dFinal.m_bVerbose )
*pDt++ = tMin.CurDt ();
// docs copy
if ( dFinal.m_bGetDocs )
{
int iDocCount = *tMin.m_pDocs;
// in case of docids we collect them into common hash and rewrite results with hashed values
if ( bHasDocids )
{
*pDocs = iDocCount;
for ( int i=1; i<iDocCount+1; ++i )
pDocs[i] = hDocids.FindOrAdd ( tMin.m_pResult->m_dDocids[tMin.m_pDocs[i]], (int)hDocids.GetLength () );
} else
memcpy ( pDocs, tMin.m_pDocs, sizeof(int) * (iDocCount + 1) );
tMin.m_pDocs += iDocCount + 1;
pDocs += iDocCount + 1;
}
++tMin.m_iIdx;
if ( tMin.m_iIdx<tMin.m_iElems )
{
// if current root is better - change the head.
if ( qMatches.GetLength () && !PQMergeResultsIterator_t::IsLess ( tMin, qMatches.Root () ) )
qMatches.Push ( tMin );
else
continue;
}
if ( !qMatches.GetLength () )
break;
tMin = qMatches.Root ();
qMatches.Pop ();
}
// repack hash into vec (if necessary)
if ( bHasDocids )
{
dRes.m_dDocids.Reset ( hDocids.GetLength() );
int64_t i = 0;
int64_t iDocid = 0;
int * pIndex = nullptr;
while ( nullptr != ( pIndex = hDocids.Iterate ( &i, &iDocid ) ) )
dRes.m_dDocids[*pIndex] = iDocid;
}
}
void PercolateIndex_c::GetIndexFiles ( StrVec_t& dFiles, StrVec_t& dExtra, const FilenameBuilder_i* pParentFilenamebuilder ) const
{
CSphString sPath = GetFilename("meta");
if ( sphIsReadable ( sPath ) )
dFiles.Add ( sPath );
if ( m_tMutableSettings.NeedSave() ) // should be file already after post-setup
{
sPath = GetFilename ( SPH_EXT_SETTINGS );
if ( sphIsReadable ( sPath ) )
dFiles.Add ( sPath );
}
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder { nullptr };
if ( !pParentFilenamebuilder && GetIndexFilenameBuilder() )
{
pFilenameBuilder = GetIndexFilenameBuilder() ( GetName() );
pParentFilenamebuilder = pFilenameBuilder.get();
}
GetSettingsFiles ( m_pTokenizer, m_pDict, GetSettings(), pParentFilenamebuilder, dExtra );
}
Bson_t PercolateIndex_c::ExplainQuery ( const CSphString & sQuery ) const
{
WordlistStub_c tWordlist;
bool bWordDict = m_pDict->GetSettings().m_bWordDict;
TokenizerRefPtr_c pQueryTokenizer = sphCloneAndSetupQueryTokenizer ( m_pTokenizer, IsStarDict ( bWordDict ), m_tSettings.m_bIndexExactWords, false );
SetupExactTokenizer ( pQueryTokenizer );
SetupStarTokenizer( pQueryTokenizer );
ExplainQueryArgs_t tArgs;
tArgs.m_szQuery = sQuery.cstr();
tArgs.m_pSchema = &GetInternalSchema();
tArgs.m_pDict = GetStatelessDict ( m_pDict );
SetupStarDictV8 ( tArgs.m_pDict );
SetupExactDict ( tArgs.m_pDict );
if ( m_pFieldFilter )
tArgs.m_pFieldFilter = m_pFieldFilter->Clone();
tArgs.m_pSettings = &m_tSettings;
tArgs.m_pWordlist = &tWordlist;
tArgs.m_pQueryTokenizer = pQueryTokenizer;
tArgs.m_iExpandKeywords = m_tMutableSettings.m_iExpandKeywords;
tArgs.m_iExpansionLimit = m_iExpansionLimit;
tArgs.m_bExpandPrefix = ( bWordDict && IsStarDict ( bWordDict ) );
tArgs.m_pMorphFields = &m_tMorphFields;
return Explain ( tArgs );
}
StoredQuerySharedPtrVecSharedPtr_t PercolateIndex_c::MakeClone () const
{
StoredQuerySharedPtrVecSharedPtr_t pNewVec {new CSphVector<StoredQuerySharedPtr_t>};
pNewVec->Reserve ( m_pQueries->GetLimit () );
for ( auto& tItem : *m_pQueries )
pNewVec->Add ( tItem );
return pNewVec;
}
// if m_pQueries has a room - do fast lockfree add. Otherwise allocate new one and do slower add.
void PercolateIndex_c::AddToStoredUnl ( StoredQuerySharedPtr_t tNew ) REQUIRES ( m_tLock )
{
m_hQueries.Add ( tNew->m_iQUID, m_pQueries->GetLength ());
assert ( m_hQueries.Find ( tNew->m_iQUID ) && ( *m_hQueries.Find ( tNew->m_iQUID )==m_pQueries->GetLength ()));
if ( m_pQueries->GetLength() < m_pQueries->GetLimit() ) // fast add possible
{
m_pQueries->Add ( std::move ( tNew ) );
++m_iGeneration;
return;
}
// no room - perform full relimit
assert ( m_pQueries->GetLength () >= m_pQueries->GetLimit () );
auto pNewVec = MakeClone ();
pNewVec->Add ( std::move (tNew) );
m_pQueries = pNewVec;
++m_iGeneration;
}
// immutable, unchangeable
SharedPQSlice_t PercolateIndex_c::GetStoredUnl () const REQUIRES_SHARED ( m_tLock )
{
return SharedPQSlice_t { m_pQueries, m_iGeneration };
}
SharedPQSlice_t PercolateIndex_c::GetStored () const EXCLUDES ( m_tLock )
{
ScRL_t rLock ( m_tLock );
return GetStoredUnl();
}
//////////////////////////////////////////////////////////////////////////
void LoadStoredQueryV6 ( DWORD uVersion, StoredQueryDesc_t & tQuery, CSphReader & tReader )
{
if ( uVersion>=3 )
tQuery.m_iQUID = tReader.GetOffset();
if ( uVersion>=4 )
tQuery.m_bQL = ( tReader.GetDword()!=0 );
tQuery.m_sQuery = tReader.GetString();
if ( uVersion==1 )
return;
tQuery.m_sTags = tReader.GetString();
tQuery.m_dFilters.Reset ( tReader.GetDword() );
tQuery.m_dFilterTree.Reset ( tReader.GetDword() );
for ( auto& tFilter : tQuery.m_dFilters )
{
tFilter.m_sAttrName = tReader.GetString();
tFilter.m_bExclude = ( tReader.GetDword()!=0 );
tFilter.m_bHasEqualMin = ( tReader.GetDword()!=0 );
tFilter.m_bHasEqualMax = ( tReader.GetDword()!=0 );
tFilter.m_eType = (ESphFilter)tReader.GetDword();
tFilter.m_eMvaFunc = (ESphMvaFunc)tReader.GetDword ();
tReader.GetBytes ( &tFilter.m_iMinValue, sizeof(tFilter.m_iMinValue) );
tReader.GetBytes ( &tFilter.m_iMaxValue, sizeof(tFilter.m_iMaxValue) );
tFilter.m_dValues.Resize ( tReader.GetDword() );
tFilter.m_dStrings.Resize ( tReader.GetDword() );
for ( auto& dValue : tFilter.m_dValues )
tReader.GetBytes ( &dValue, sizeof ( dValue ) );
for ( auto& dString : tFilter.m_dStrings )
dString = tReader.GetString ();
}
for ( auto & tItem : tQuery.m_dFilterTree )
{
tItem.m_iLeft = tReader.GetDword();
tItem.m_iRight = tReader.GetDword();
tItem.m_iFilterItem = tReader.GetDword();
tItem.m_bOr = ( tReader.GetDword()!=0 );
}
}
template<typename READER>
inline CSphString GetZString ( READER & tReader )
{
return tReader.GetZString();
}
// if implement GetZString in MemoryReader_c -> need to upgrade v of replication also
template<>
inline CSphString GetZString ( MemoryReader_c & tReader )
{
return tReader.GetString ();
}
template<typename READER>
void LoadStoredQuery ( DWORD uVersion, StoredQueryDesc_t & tQuery, READER & tReader )
{
assert ( uVersion>=7 );
tQuery.m_iQUID = tReader.UnzipOffset();
tQuery.m_bQL = ( tReader.UnzipInt()!=0 );
tQuery.m_sQuery = GetZString ( tReader );
tQuery.m_sTags = GetZString ( tReader );
tQuery.m_dFilters.Reset ( tReader.UnzipInt() );
tQuery.m_dFilterTree.Reset ( tReader.UnzipInt() );
for ( auto& tFilter : tQuery.m_dFilters )
{
tFilter.m_sAttrName = GetZString ( tReader );
tFilter.m_bExclude = ( tReader.UnzipInt()!=0 );
tFilter.m_bHasEqualMin = ( tReader.UnzipInt()!=0 );
tFilter.m_bHasEqualMax = ( tReader.UnzipInt()!=0 );
tFilter.m_bOpenLeft = ( tReader.UnzipInt()!=0 );
tFilter.m_bOpenRight = ( tReader.UnzipInt()!=0 );
tFilter.m_bIsNull = ( tReader.UnzipInt()!=0 );
tFilter.m_eType = (ESphFilter)tReader.UnzipInt();
tFilter.m_eMvaFunc = (ESphMvaFunc)tReader.UnzipInt ();
tFilter.m_iMinValue = tReader.UnzipOffset();
tFilter.m_iMaxValue = tReader.UnzipOffset();
int iValCount = tReader.UnzipInt();
int iStrCount = tReader.UnzipInt();
CSphFixedVector<SphAttr_t> dVals ( iValCount );
CSphFixedVector<CSphString> dStrings ( iStrCount );
for ( auto & dValue : dVals )
dValue = tReader.UnzipOffset ();
for ( auto & dString : dStrings )
dString = GetZString ( tReader );
tFilter.m_dValues.AdoptData ( dVals.LeakData(), iValCount, iValCount );
tFilter.m_dStrings.AdoptData ( dStrings.LeakData(), iStrCount, iStrCount );
}
for ( auto& tItem : tQuery.m_dFilterTree )
{
tItem.m_iLeft = tReader.UnzipInt();
tItem.m_iRight = tReader.UnzipInt();
tItem.m_iFilterItem = tReader.UnzipInt();
tItem.m_bOr = ( tReader.UnzipInt()!=0 );
}
}
template<typename WRITER>
inline void PutZString ( const CSphString& sVal, WRITER & tWriter )
{
tWriter.PutZString ( sVal );
}
template<>
inline void PutZString ( const CSphString & sVal, MemoryWriter_c & tWriter )
{
tWriter.PutString ( sVal );
}
template<typename WRITER>
void SaveStoredQueryImpl ( const StoredQueryDesc_t & tQuery, WRITER & tWriter )
{
tWriter.ZipOffset ( tQuery.m_iQUID );
tWriter.ZipInt ( tQuery.m_bQL );
PutZString ( tQuery.m_sQuery, tWriter );
PutZString ( tQuery.m_sTags, tWriter );
tWriter.ZipInt ( tQuery.m_dFilters.GetLength() );
tWriter.ZipInt ( tQuery.m_dFilterTree.GetLength() );
for ( const CSphFilterSettings & tFilter : tQuery.m_dFilters )
{
PutZString ( tFilter.m_sAttrName, tWriter );
tWriter.ZipInt ( tFilter.m_bExclude );
tWriter.ZipInt ( tFilter.m_bHasEqualMin );
tWriter.ZipInt ( tFilter.m_bHasEqualMax );
tWriter.ZipInt ( tFilter.m_bOpenLeft );
tWriter.ZipInt ( tFilter.m_bOpenRight );
tWriter.ZipInt ( tFilter.m_bIsNull );
tWriter.ZipInt ( tFilter.m_eType );
tWriter.ZipInt ( tFilter.m_eMvaFunc );
tWriter.ZipOffset ( tFilter.m_iMinValue );
tWriter.ZipOffset ( tFilter.m_iMaxValue );
tWriter.ZipInt ( tFilter.m_dValues.GetLength() );
tWriter.ZipInt ( tFilter.m_dStrings.GetLength() );
for ( const auto & tValue: tFilter.m_dValues )
tWriter.ZipOffset ( tValue );
for ( const auto & tString: tFilter.m_dStrings )
PutZString ( tString, tWriter );
}
for ( const FilterTreeItem_t & tItem: tQuery.m_dFilterTree )
{
tWriter.ZipInt ( tItem.m_iLeft );
tWriter.ZipInt ( tItem.m_iRight );
tWriter.ZipInt ( tItem.m_iFilterItem );
tWriter.ZipInt ( tItem.m_bOr );
}
}
void operator<< ( JsonEscapedBuilder& tOut, const FilterTreeItem_t& tItem )
{
auto _ = tOut.Object();
tOut.NamedValNonDefault ( "left", tItem.m_iLeft, -1 );
tOut.NamedValNonDefault ( "right", tItem.m_iRight, -1 );
tOut.NamedValNonDefault ( "item", tItem.m_iFilterItem, -1 );
tOut.NamedValNonDefault ( "or", tItem.m_bOr, false );
}
void operator<< ( JsonEscapedBuilder& tOut, const CSphFilterSettings& tFilter )
{
auto _ = tOut.ObjectW();
tOut.NamedValNonDefault ( "type", tFilter.m_eType, SPH_FILTER_VALUES );
tOut.NamedStringNonEmpty ( "attr", tFilter.m_sAttrName );
if ( tFilter.m_eType==SPH_FILTER_FLOATRANGE )
{
tOut.NamedVal( "fmin", tFilter.m_fMinValue );
tOut.NamedVal ( "fmax", tFilter.m_fMaxValue );
} else if ( tFilter.m_eType== SPH_FILTER_RANGE )
{
tOut.NamedValNonDefault ( "min", tFilter.m_iMinValue, (SphAttr_t)LLONG_MIN );
tOut.NamedValNonDefault ( "max", tFilter.m_iMaxValue, (SphAttr_t)LLONG_MAX );
}
tOut.NamedValNonDefault ( "not", tFilter.m_bExclude, false );
tOut.NamedValNonDefault ( "eq_min", tFilter.m_bHasEqualMin, true );
tOut.NamedValNonDefault ( "eq_max", tFilter.m_bHasEqualMax, true );
tOut.NamedValNonDefault ( "open_left", tFilter.m_bOpenLeft, false );
tOut.NamedValNonDefault ( "open_right", tFilter.m_bOpenRight, false );
tOut.NamedValNonDefault ( "is_null", tFilter.m_bIsNull, false );
tOut.NamedValNonDefault ( "mva_func", tFilter.m_eMvaFunc, SPH_MVAFUNC_NONE );
if ( !tFilter.m_dValues.IsEmpty() )
{
tOut.Named ( "values" );
auto _ = tOut.ArrayW();
for ( const auto& tValue : tFilter.m_dValues )
tOut << tValue;
}
if ( !tFilter.m_dStrings.IsEmpty() )
{
tOut.Named ( "strings" );
auto _ = tOut.ArrayW();
for ( const auto& tValue : tFilter.m_dStrings )
tOut.FixupSpacedAndAppendEscaped (tValue.cstr());
}
}
void operator<< ( JsonEscapedBuilder& tOut, const StoredQueryDesc_t& tQuery )
{
auto tRoot = tOut.ObjectW();
tOut.NamedVal ( "quid", tQuery.m_iQUID );
tOut.NamedValNonDefault ( "ql", tQuery.m_bQL, true );
tOut.NamedStringNonEmpty ( "query", tQuery.m_sQuery );
tOut.NamedStringNonEmpty ( "tags", tQuery.m_sTags );
if ( !tQuery.m_dFilters.IsEmpty() )
{
tOut.Named ( "filters" );
auto _ = tOut.ArrayW();
for ( const auto& tFilter : tQuery.m_dFilters )
tOut << tFilter;
}
if ( !tQuery.m_dFilterTree.IsEmpty() )
{
tOut.Named ( "filter_tree" );
auto _ = tOut.ArrayW();
for ( const auto& tItem : tQuery.m_dFilterTree )
tOut << tItem;
}
}
template<typename T>
class JsonLoaderData_T
{
CSphFixedVector<T> m_dVals { 0 };
int m_iItem { 0 };
bson::Bson_c m_tParent;
public:
explicit JsonLoaderData_T ( bson::NodeHandle_t tNode )
: m_tParent ( tNode )
{
if ( !bson::IsNullNode ( m_tParent ) )
m_dVals.Reset ( m_tParent.CountValues() );
}
T & GetNextItem ()
{
return m_dVals[m_iItem++];
}
void LoadItemJson ( bson::Action_f && fAction )
{
if ( bson::IsNullNode ( m_tParent ) )
return;
m_tParent.ForEach ( [&fAction] ( const bson::NodeHandle_t& tNode ) {
fAction ( tNode );
} );
}
void MoveTo ( CSphVector<T> & dDst )
{
if ( bson::IsNullNode ( m_tParent ) )
return;
int iCount = m_dVals.GetLength();
dDst.AdoptData ( m_dVals.LeakData(), iCount, iCount );
}
void SwapData ( CSphFixedVector<T> & dDst )
{
if ( bson::IsNullNode ( m_tParent ) )
return;
dDst.SwapData ( m_dVals );
}
};
void LoadStoredFilterTreeItemJson ( const bson::Bson_c & tNode, FilterTreeItem_t & tItem )
{
using namespace bson;
tItem.m_iLeft = (int)Int ( tNode.ChildByName ( "left" ), -1 );
tItem.m_iRight = (int)Int ( tNode.ChildByName ( "right" ), -1 );
tItem.m_iFilterItem = (int)Int ( tNode.ChildByName ( "item" ), -1 );
tItem.m_bOr = Bool ( tNode.ChildByName ( "or" ), false );
}
void LoadStoredFilterItemJson ( const bson::Bson_c & tNode, CSphFilterSettings & tFilter )
{
using namespace bson;
tFilter.m_eType = (ESphFilter)Int ( tNode.ChildByName ( "type" ), SPH_FILTER_VALUES );
tFilter.m_sAttrName = String ( tNode.ChildByName ( "attr" ) );
if ( tFilter.m_eType == SPH_FILTER_FLOATRANGE )
{
tFilter.m_fMinValue = (float)Double ( tNode.ChildByName ( "fmin" ) );
tFilter.m_fMaxValue = (float)Double ( tNode.ChildByName ( "fmax" ) );
} else if ( tFilter.m_eType == SPH_FILTER_RANGE )
{
tFilter.m_iMinValue = Int ( tNode.ChildByName ( "min" ), (SphAttr_t)LLONG_MIN );
tFilter.m_iMaxValue = Int ( tNode.ChildByName ( "max" ), (SphAttr_t)LLONG_MAX );
}
tFilter.m_bExclude = Bool ( tNode.ChildByName ( "not" ), false );
tFilter.m_bHasEqualMin = Bool ( tNode.ChildByName ( "eq_min" ), true );
tFilter.m_bHasEqualMax = Bool ( tNode.ChildByName ( "eq_max" ), true );
tFilter.m_bOpenLeft = Bool ( tNode.ChildByName ( "open_left" ), false );
tFilter.m_bOpenRight = Bool ( tNode.ChildByName ( "open_right" ), false );
tFilter.m_bIsNull = Bool ( tNode.ChildByName ( "is_null" ), false );
tFilter.m_eMvaFunc = (ESphMvaFunc)Int ( tNode.ChildByName ( "mva_func" ), SPH_MVAFUNC_NONE );
{
JsonLoaderData_T<SphAttr_t> tLoaderValues ( tNode.ChildByName ( "values" ) );
tLoaderValues.LoadItemJson ( [&tLoaderValues] ( const NodeHandle_t& tNode ) { tLoaderValues.GetNextItem() = Int ( tNode ); } );
tLoaderValues.MoveTo ( tFilter.m_dValues );
}
{
JsonLoaderData_T<CSphString> tLoaderStrings ( tNode.ChildByName ( "strings" ) );
tLoaderStrings.LoadItemJson ( [&tLoaderStrings] ( const NodeHandle_t& tNode ) { tLoaderStrings.GetNextItem() = String ( tNode ); } );
tLoaderStrings.MoveTo ( tFilter.m_dStrings );
}
}
void LoadStoredQueryJson ( StoredQueryDesc_t & tQuery, const bson::Bson_c & tNode )
{
using namespace bson;
assert ( tNode.IsAssoc() );
tQuery.m_iQUID = Int ( tNode.ChildByName ( "quid" ) );
tQuery.m_bQL = Bool ( tNode.ChildByName ( "ql" ), true );
tQuery.m_sQuery = String ( tNode.ChildByName ( "query" ) );
tQuery.m_sTags = String ( tNode.ChildByName ( "tags" ) );
{
JsonLoaderData_T<CSphFilterSettings> tLoaderFilters ( tNode.ChildByName ( "filters" ) );
tLoaderFilters.LoadItemJson ( [&tLoaderFilters] ( const NodeHandle_t& tNode ) {
CSphFilterSettings & tFilter = tLoaderFilters.GetNextItem();
LoadStoredFilterItemJson ( tNode, tFilter );
} );
tLoaderFilters.SwapData ( tQuery.m_dFilters );
}
{
JsonLoaderData_T<FilterTreeItem_t> tLoaderFilterTree ( tNode.ChildByName ( "filter_tree" ) );
tLoaderFilterTree.LoadItemJson ( [&tLoaderFilterTree] ( const NodeHandle_t& tNode ) {
auto & tFilterItem = tLoaderFilterTree.GetNextItem();
LoadStoredFilterTreeItemJson ( tNode, tFilterItem );
} );
tLoaderFilterTree.SwapData ( tQuery.m_dFilterTree );
}
}
void LoadStoredQuery ( ByteBlob_t tData, StoredQueryDesc_t& tQuery )
{
MemoryReader_c tReader { tData };
LoadStoredQuery ( PQ_META_VERSION_MAX, tQuery, tReader );
}
void LoadStoredQuery ( DWORD uVersion, StoredQueryDesc_t & tQuery, CSphReader & tReader )
{
LoadStoredQuery<CSphReader> ( uVersion, tQuery, tReader );
}
void SaveStoredQuery ( const StoredQueryDesc_t & tQuery, MemoryWriter_c& tWriter )
{
SaveStoredQueryImpl<MemoryWriter_c> ( tQuery, tWriter );
}
void SaveStoredQuery ( const StoredQueryDesc_t & tQuery, CSphWriter & tWriter )
{
SaveStoredQueryImpl<CSphWriter> ( tQuery, tWriter );
}
template<typename READER>
void LoadDeleteQuery_T ( CSphVector<int64_t> & dQueries, CSphString & sTags, READER & tReader )
{
dQueries.Resize ( tReader.UnzipInt() );
ARRAY_FOREACH ( i, dQueries )
dQueries[i] = tReader.UnzipOffset();
sTags = tReader.GetString();
}
void LoadDeleteQuery ( ByteBlob_t tData, CSphVector<int64_t> & dQueries, CSphString & sTags )
{
MemoryReader_c tReader ( tData );
LoadDeleteQuery_T ( dQueries, sTags, tReader );
}
void LoadDeleteQuery ( CSphVector<int64_t> & dQueries, CSphString & sTags, CSphReader & tReader )
{
LoadDeleteQuery_T ( dQueries, sTags, tReader );
}
template<typename WRITER>
void SaveDeleteQuery_T ( const VecTraits_T<int64_t>& dQueries, const char* sTags, WRITER& tWriter )
{
tWriter.ZipInt ( dQueries.GetLength () );
for ( int64_t iQuery : dQueries )
tWriter.ZipOffset ( iQuery );
tWriter.PutString ( sTags );
}
void SaveDeleteQuery ( const VecTraits_T<int64_t>& dQueries, const char* sTags, MemoryWriter_c& tWriter )
{
SaveDeleteQuery_T ( dQueries, sTags, tWriter );
}
void SaveDeleteQuery ( const VecTraits_T<int64_t> & dQueries, const char * sTags, CSphWriter & tWriter )
{
SaveDeleteQuery_T ( dQueries, sTags, tWriter );
}
| 119,673
|
C++
|
.cpp
| 3,115
| 35.773355
| 232
| 0.714977
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,931
|
binlog.cpp
|
manticoresoftware_manticoresearch/src/binlog.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "binlog.h"
#include "sphinxrt.h"
#include "memio.h"
#include "coroutine.h"
static constexpr int BINLOG_WRITE_BUFFER = 256*1024;
static constexpr int64_t BINLOG_AUTO_FLUSH = 1000000; // 1 sec
static constexpr DWORD BINLOG_HEADER_MAGIC_SPBL = 0x4c425053; /// magic 'SPBL' header that marks binlog file
static constexpr DWORD BLOP_MAGIC_TXN_ = 0x214e5854; /// magic 'TXN!' header that marks binlog entry
static constexpr DWORD BINLOG_META_MAGIC_SPLI = 0x494c5053; /// magic 'SPLI' header that marks binlog meta
// up to 12: PQ_ADD_DELETE added
// 13 : changed txn format; now stores total documents also
// 14 : ??
// 15 : big refactor: remove external ops; ops is 1 byte (unzipped); + internal ops, + size for ADD_TXN, - index ID
constexpr unsigned int BINLOG_VERSION = 15;
/// Bin Log Operation
enum Blop_e : BYTE
{
ADD_INDEX = 1,
ADD_TXN = 2,
ADD_CACHE = 3,
TOTAL
};
//////////////////////////////////////////////////////////////////////////
// BINLOG
//////////////////////////////////////////////////////////////////////////
/// binlog file view of the index
/// everything that a given log file needs to know about an index
struct BinlogIndexInfo_t
{
CSphString m_sName;
int64_t m_iMinTID = INT64_MAX; ///< min TID logged by this file
int64_t m_iMaxTID = 0; ///< max TID logged by this file
int64_t m_iFlushedTID = 0; ///< last flushed TID
};
struct BinlogIndexReplayInfo_t : BinlogIndexInfo_t
{
CSphIndex * m_pIndex = nullptr; ///< associated index (might be NULL if we don't serve it anymore!)
int64_t m_iPreReplayTID = 0; ///< index TID at the beginning of this file replay
};
/// binlog file descriptor
/// file id (aka extension), plus a list of associated index infos
struct BinlogFileDesc_t
{
int m_iExt = 0;
CSphVector<BinlogIndexInfo_t> m_dIndexInfos;
};
struct BinlogReplayFileDesc_t
{
int m_iExt;
CSphVector<BinlogIndexReplayInfo_t> m_dIndexInfos;
};
class BinlogWriter_c final : public MemoryWriter2_c
{
public:
BinlogWriter_c() : MemoryWriter2_c(m_dBuf) {}
~BinlogWriter_c() final { CloseFile(); }
bool Write ( bool bRemoveUnsuccessful = true );
bool WriteAndFsync();
bool Fsync ( int iFD = -1 );
int64_t GetFilePos() const noexcept { return m_iLastFilePos; }
bool OpenFile ( const CSphString & sFile, CSphString & sError );
void CloseFile();
int LeakFD();
int GetFD () const noexcept;
CSphString GetFilename() const noexcept;
const CSphString & GetError() const { return m_sError; }
bool HasUnwrittenData () const { return !m_dBuf.IsEmpty(); }
bool HasUnsyncedData () const { return m_iLastFsyncPos.load(std::memory_order_relaxed)!=m_iLastFilePos; }
bool IsOpen() const noexcept { return ( m_tFile.GetFD()!=-1 ); }
private:
friend class BinlogTransactionGuard_c;
friend class TransactionSizeGuard_c;
void StartTransaction ();
void EndTransaction ( int64_t iStartTransaction, bool bWriteOnOverflow );
inline int64_t GetBuffPos () const noexcept { return m_dBuf.GetLengthBytes64 (); }
private:
CSphAutofile m_tFile;
CSphVector<BYTE> m_dBuf;
CSphString m_sError;
int64_t m_iLastFilePos = 0;
std::atomic<int64_t> m_iLastFsyncPos { 0 };
int m_iLastTransactionStartPos = 0;
};
class BinlogReader_c final : public CSphAutoreader
{
public:
void ResetCrc ();
bool CheckCrc ( const char * sOp, const char * sIndexName, int64_t iTid, int64_t iTxnPos );
private:
DWORD m_uCRC = 0;
int m_iLastCrcPos = m_iBuffPos;
void UpdateCache () final;
void HashCollected ();
};
using namespace Binlog;
class Binlog_c;
enum class FlushAction_e : int
{
ACTION_NONE = 0,
ACTION_FSYNC = 1,
ACTION_WRITE = 2,
ACTION_FSYNC_ON_CLOSE = 3,
};
enum class BinlogFileState_e
{
OK,
ERROR_NON_READABLE, // if can't open binlog file
ERROR_EMPTY_0, // if file is empty (0 bytes)
ERROR_WRONG_FILE, // file is broken
ERROR_EMPTY_8, // file is valid, but no txns
ERROR_ABANDONED // file is valid, but only cache, no txns
};
using BinlogMutex_t = Threads::Coro::Mutex_c;
using ScopedBinlogMutex_t = Threads::ScopedCoroMutex_t;
class SingleBinlog_c final
{
Binlog_c * m_pOwner;
mutable BinlogMutex_t m_tWriteAccess; // serialize ops
BinlogWriter_c m_tWriter GUARDED_BY ( m_tWriteAccess );
mutable Threads::Coro::RWLock_c m_tLogFilesAccess;
CSphVector<BinlogFileDesc_t> m_dLogFiles GUARDED_BY ( m_tLogFilesAccess ); // active log files
Threads::Coro::Waitable_T<int> m_tFlushRunning { 0 };
public:
NONCOPYMOVABLE( SingleBinlog_c );
explicit SingleBinlog_c ( Binlog_c* pOwner ) noexcept
: m_pOwner { pOwner } {}
~SingleBinlog_c ();
void Deinit();
std::pair<int, SingleBinlog_c*> DoFlush ( FlushAction_e eAction ) EXCLUDES ( m_tWriteAccess );
void DoFsync ( int iFd );
void CollectBinlogFiles ( CSphVector<int>& tOutput ) const noexcept EXCLUDES ( m_tLogFilesAccess );
bool BinlogCommit ( int64_t * pTID, const char* szIndexName, FnWriteCommit fnSaver, CSphString & sError ) EXCLUDES ( m_tWriteAccess );
bool NotifyIndexFlush ( int64_t iFlushedTID, const char * szIndexName, Shutdown_e eShutdown, ForceSave_e eForceSave ) EXCLUDES ( m_tWriteAccess );
void AdoptIndex ( int iExt, const BinlogIndexInfo_t & tIdx ) REQUIRES ( m_tLogFilesAccess );
void AdoptFile ( int iExt ) REQUIRES ( m_tLogFilesAccess );
void OpenNewLog ( BinlogFileState_e eState = BinlogFileState_e::OK ) REQUIRES ( m_tWriteAccess );
int64_t LastTidFor ( const CSphString & sIndex ) const noexcept EXCLUDES ( m_tLogFilesAccess );
void LockWriter () ACQUIRE ( m_tWriteAccess );
void UnlockWriter () RELEASE ( m_tWriteAccess );
private:
void RemoveLastEmptyLog () EXCLUDES ( m_tLogFilesAccess );
int GetWriteIndexID ( const char * szIndexName, int64_t iTID ) EXCLUDES ( m_tLogFilesAccess ) REQUIRES ( m_tWriteAccess );
void DoCacheWrite () EXCLUDES ( m_tLogFilesAccess ) REQUIRES ( m_tWriteAccess );
void CheckDoRestart () REQUIRES ( m_tWriteAccess );
bool CheckDoFlush () REQUIRES ( m_tWriteAccess );
void SaveMeta () EXCLUDES ( m_tLogFilesAccess ) ;
void FixNofFiles ( int iFiles );
};
using SingleBinlogPtr = std::unique_ptr<SingleBinlog_c>;
class Binlog_c : public ISphNoncopyable
{
friend class SingleBinlog_c;
public:
~Binlog_c();
void NotifyIndexFlush ( int64_t iTID, const char * szIndexName, Shutdown_e eShutdown, ForceSave_e eForceSave );
bool BinlogCommit ( int64_t * pTID, const char * szIndexName, FnWriteCommit fnSaver, CSphString & sError );
void Configure ( const CSphConfigSection & hSearchd, DWORD uReplayFlags );
void SetCommon ( bool bCommonBinlog );
void Replay ( const SmallStringHash_T<CSphIndex*> & hIndexes, ProgressCallbackSimple_t * pfnProgressCallback );
bool IsActive () const { return !m_bDisabled; }
bool MockDisabled ( bool bNewVal );
void CheckAndSetPath ( CSphString sBinlogPath );
bool IsFlushingEnabled() const;
void DoFlush (); // invoked by task binlog flush, every BINLOG_AUTO_FLUSH (1 sec)
int64_t NextFlushingTime() const noexcept;
inline CSphString GetLogPath() const noexcept { return m_sLogPath; }
int64_t LastTidFor ( const CSphString & sIndex ) const noexcept EXCLUDES ( m_tHashAccess );
private:
std::atomic<int64_t> m_iLastFlushed {0};
int64_t m_iFlushPeriod = BINLOG_AUTO_FLUSH;
FlushAction_e m_eFlushFlavour = FlushAction_e::ACTION_NONE;
mutable Threads::Coro::RWLock_c m_tHashAccess;
SmallStringHash_T<SingleBinlogPtr> m_hBinlogs GUARDED_BY ( m_tHashAccess );
int m_iLockFD = -1;
CSphString m_sLogPath;
IntVec_t m_dSavedFiles;
mutable Threads::Coro::RWLock_c m_tCurrentFilesAccess;
IntVec_t m_dCurrentFiles GUARDED_BY ( m_tCurrentFilesAccess );
bool m_bReplayMode = false; // replay mode indicator
bool m_bDisabled = true;
DWORD m_uReplayFlags = 0;
bool m_bWrongVersion = false;
bool m_bCommonBinlog = false; // per-index binlog(false), or old-way common binlog(true)
int m_iRestartSize = 268435456; // binlog size restart threshold, 256M; searchd.binlog_max_log_size
int m_iBinlogFileDigits = 4; // how many digits use for naming binlog files
CSphString m_sBinlogFileNameTemplate;
std::atomic<int> m_iNextBinlog { 0 };
std::atomic<int> m_iNumFiles;
private:
SingleBinlog_c * GetWriteIndexBinlog ( const char* szIndexName, bool bOpenNewLog=true ) EXCLUDES ( m_tHashAccess );
SingleBinlog_c * GetSingleWriteIndexBinlog ( bool bOpenNewLog ) EXCLUDES ( m_tHashAccess );
SingleBinlog_c * GetFlushIndexBinlog ( const char * szIndexName ) REQUIRES ( m_tHashAccess );
void LoadMeta ();
enum SaveMeta_e : bool { eNoForce, eForce };
void DoSaveMeta ( IntVec_t dFiles, SaveMeta_e eForce ) EXCLUDES ( m_tCurrentFilesAccess );
void SaveMeta () EXCLUDES ( m_tHashAccess ) EXCLUDES ( m_tCurrentFilesAccess );
void SaveMetaUnlock ( SaveMeta_e eForce = eNoForce ) REQUIRES_SHARED ( m_tHashAccess ) EXCLUDES ( m_tCurrentFilesAccess );
IntVec_t CollectBinlogFiles() EXCLUDES ( m_tHashAccess );
IntVec_t CollectBinlogFilesUnlock() REQUIRES_SHARED ( m_tHashAccess );
bool CompareCurrentFiles ( const IntVec_t & dFiles ) EXCLUDES ( m_tCurrentFilesAccess );
void LockBinlog ();
void UnlockBinlog ();
void RemoveFile ( int iExt );
BinlogFileState_e ReplayBinlog ( BinlogReplayFileDesc_t & tLog, const SmallStringHash_T<CSphIndex*> & hIndexes );
bool ReplayTxn ( const BinlogReplayFileDesc_t & tLog, BinlogReader_c & tReader ) const;
bool ReplayIndexAdd ( BinlogReplayFileDesc_t & tLog, const SmallStringHash_T<CSphIndex*> & hIndexes, BinlogReader_c & tReader ) const;
bool ReplayCacheAdd ( const BinlogReplayFileDesc_t & tLog, DWORD uVersion, BinlogReader_c & tReader ) const;
bool IsBinlogWritable () const noexcept;
bool PerformChecks ( const char * szOp, BinlogIndexInfo_t & tIndex, int64_t iTID, int64_t iTxnPos, BinlogReader_c & tReader ) const;
void Log ( DWORD uFlag, const char * sTemplate, ... ) const;
int ReplayIndexID ( CSphReader & tReader, const BinlogReplayFileDesc_t & tLog ) const;
CSphString MakeBinlogName ( int iExt ) const noexcept;
void MakeBinlogFilenameTemplate () noexcept;
int NextBinlogExt();
void FixNofFiles ( int iFiles );
};
std::unique_ptr<Binlog_c> g_pRtBinlog;
inline const char * SzTxnName ( Txn_e eTxn )
{
switch ( eTxn )
{
case UPDATE_ATTRS: return "update";
case COMMIT: return "commit";
case PQ_ADD_DELETE: return "pq_add_delete";
}
}
//////////////////////////////////////////////////////////////////////////
// BinlogTransactionGuard_c
//////////////////////////////////////////////////////////////////////////
class BinlogTransactionGuard_c final
{
public:
NONCOPYMOVABLE ( BinlogTransactionGuard_c );
explicit BinlogTransactionGuard_c ( BinlogWriter_c & tWriter, bool bWriteOnOverflow = true )
: m_tWriter ( tWriter )
, m_bWriteOnOverflow ( bWriteOnOverflow )
, m_iStartTransaction ( tWriter.GetBuffPos() )
{
m_tWriter.StartTransaction();
}
~BinlogTransactionGuard_c()
{
m_tWriter.EndTransaction ( m_iStartTransaction+sizeof ( BLOP_MAGIC_TXN_ ), m_bWriteOnOverflow );
}
private:
BinlogWriter_c & m_tWriter;
bool m_bWriteOnOverflow;
int64_t m_iStartTransaction;
};
class TransactionSizeGuard_c final
{
public:
NONCOPYMOVABLE ( TransactionSizeGuard_c );
explicit TransactionSizeGuard_c ( BinlogWriter_c & tWriter )
: m_tWriter ( tWriter )
, m_iStartTransaction ( tWriter.GetBuffPos () )
{
m_tWriter.PutDword ( 0 ); // placeholder for size
}
~TransactionSizeGuard_c ()
{
DWORD uSize = m_tWriter.GetBuffPos ()-m_iStartTransaction-sizeof ( DWORD );
memcpy ( &m_tWriter.m_dBuf[m_iStartTransaction], &uSize, sizeof ( DWORD ) );
}
private:
BinlogWriter_c & m_tWriter;
int64_t m_iStartTransaction;
};
//////////////////////////////////////////////////////////////////////////
// BinlogWriter_c
//////////////////////////////////////////////////////////////////////////
bool BinlogWriter_c::Write ( bool bRemoveUnsuccessful )
{
if ( m_dBuf.IsEmpty() )
return true;
if ( !WriteNonThrottled ( m_tFile.GetFD(), m_dBuf.Begin(), m_dBuf.GetLength(), m_tFile.GetFilename(), m_sError ) )
{
// if we got a partial write, clamp the file at the end of last written transaction
sphSeek ( m_tFile.GetFD(), m_iLastFilePos, SEEK_SET );
sphTruncate ( m_tFile.GetFD() );
if ( bRemoveUnsuccessful )
{
// remove last transaction from memory buffer
// other unwritten transactions may still be in the buffer, but we can't tell the daemon that they failed at this point,
// so we remove only the last one
m_dBuf.Resize ( m_iLastTransactionStartPos );
}
return false;
}
m_iLastFilePos += m_dBuf.GetLength ();
m_iLastTransactionStartPos = 0;
m_dBuf.Resize(0);
return true;
}
#if _WIN32
int fsync ( int iFD )
{
// map fd to handle
HANDLE h = (HANDLE) _get_osfhandle ( iFD );
if ( h==INVALID_HANDLE_VALUE )
{
errno = EBADF;
return -1;
}
// do flush
if ( FlushFileBuffers(h) )
return 0;
// error handling
errno = EIO;
if ( GetLastError()==ERROR_INVALID_HANDLE )
errno = EINVAL;
return -1;
}
#endif
bool BinlogWriter_c::Fsync (int iFD)
{
if ( iFD==-1 )
iFD = m_tFile.GetFD ();
if ( fsync ( iFD )!=0 )
{
m_sError.SetSprintf ( "failed to sync %s: %s", m_tFile.GetFilename (), strerrorm ( errno ) );
return false;
}
if ( m_iLastFsyncPos<m_iLastFilePos )
m_iLastFsyncPos.store ( m_iLastFilePos, std::memory_order_relaxed );
return true;
}
bool BinlogWriter_c::WriteAndFsync()
{
if ( HasUnwrittenData() && !Write() )
return false;
if ( !HasUnsyncedData() )
return true;
return Fsync();
}
bool BinlogWriter_c::OpenFile ( const CSphString & sFile, CSphString & sError )
{
m_iLastFilePos = 0;
return m_tFile.Open ( sFile, SPH_O_NEW, sError )>=0;
}
void BinlogWriter_c::CloseFile()
{
if ( HasUnwrittenData() )
Write();
m_tFile.Close();
m_iLastFsyncPos.store ( m_iLastFilePos = 0, std::memory_order_relaxed ); // that is =, not ==
}
int BinlogWriter_c::LeakFD()
{
if ( HasUnwrittenData () )
Write ();
m_iLastFsyncPos.store ( m_iLastFilePos = 0, std::memory_order_relaxed ); // that is =, not ==
return m_tFile.LeakID ();
}
int BinlogWriter_c::GetFD () const noexcept
{
return m_tFile.GetFD();
}
CSphString BinlogWriter_c::GetFilename () const noexcept
{
return m_tFile.GetFilename();
}
void BinlogWriter_c::StartTransaction ()
{
m_iLastTransactionStartPos = m_dBuf.GetLength ();
PutDword ( BLOP_MAGIC_TXN_ );
}
void BinlogWriter_c::EndTransaction ( int64_t iStartTransaction, bool bWriteOnOverflow )
{
auto uCRC = sphCRC32 ( m_dBuf.Slice ( iStartTransaction ) );
PutDword ( uCRC );
// try to write if buffer gets too large but don't handle write errors just yet
// also, don't remove unsuccessful transactions from the buffer
if ( bWriteOnOverflow && m_dBuf.GetLength ()>BINLOG_WRITE_BUFFER )
Write ( false );
}
//////////////////////////////////////////////////////////////////////////
// BinlogReader_c
//////////////////////////////////////////////////////////////////////////
void BinlogReader_c::ResetCrc ()
{
m_uCRC = 0;
m_iLastCrcPos = m_iBuffPos;
}
bool BinlogReader_c::CheckCrc ( const char * sOp, const char * sIndexName, int64_t iTid, int64_t iTxnPos )
{
HashCollected ();
DWORD uCRC = m_uCRC;
DWORD uRefCRC = CSphAutoreader::GetDword();
ResetCrc();
bool bPassed = ( uRefCRC==uCRC );
if ( !bPassed )
sphWarning ( "binlog: %s: CRC mismatch (table=%s, tid=" INT64_FMT ", pos=" INT64_FMT ")", sOp, sIndexName ? sIndexName : "", iTid, iTxnPos );
return bPassed;
}
void BinlogReader_c::UpdateCache ()
{
HashCollected();
CSphAutoreader::UpdateCache();
m_iLastCrcPos = m_iBuffPos;
}
void BinlogReader_c::HashCollected ()
{
assert ( m_iLastCrcPos<=m_iBuffPos );
m_uCRC = sphCRC32 ( m_pBuff + m_iLastCrcPos, m_iBuffPos - m_iLastCrcPos, m_uCRC );
m_iLastCrcPos = m_iBuffPos;
}
//////////////////////////////////////////////////////////////////////////
SingleBinlog_c::~SingleBinlog_c ()
{
Deinit ();
}
void SingleBinlog_c::Deinit () NO_THREAD_SAFETY_ANALYSIS
{
if ( !m_pOwner )
return;
bool bLastLogEmpty = false;
if ( !m_dLogFiles.IsEmpty () && m_tWriter.IsOpen () )
bLastLogEmpty = m_dLogFiles.Last ().m_dIndexInfos.IsEmpty ();
// could be already closed and meta saved on shutdown
if ( m_tWriter.IsOpen () )
{
DoCacheWrite ();
m_tWriter.WriteAndFsync ();
}
m_tWriter.CloseFile ();
// should remove last binlog if no tnx was writen
if ( bLastLogEmpty )
RemoveLastEmptyLog ();
m_pOwner = nullptr;
}
std::pair<int, SingleBinlog_c*> SingleBinlog_c::DoFlush ( FlushAction_e eAction )
{
MEMORY ( MEM_BINLOG );
ScopedBinlogMutex_t tLock ( m_tWriteAccess );
std::pair<int, SingleBinlog_c *> tRes { -1, this };
if ( eAction==FlushAction_e::ACTION_NONE && m_tWriter.HasUnwrittenData () )
{
if ( !m_tWriter.Write () )
return tRes;
}
if ( !m_tWriter.HasUnsyncedData () )
return tRes;
tRes.first = m_tWriter.GetFD ();
if ( tRes.first != -1 )
m_tFlushRunning.ModifyValue ( [] ( int & iVal ) { ++iVal; } );
return tRes;
}
void SingleBinlog_c::DoFsync ( int iSyncFd ) NO_THREAD_SAFETY_ANALYSIS
{
if ( iSyncFd==-1 )
return;
m_tWriter.Fsync ( iSyncFd );
m_tFlushRunning.ModifyValueAndNotifyAll ( [] ( int & iVal ) { --iVal; } );
}
void SingleBinlog_c::CollectBinlogFiles ( CSphVector<int> & tOutput ) const noexcept
{
Threads::SccRL_t tLock ( m_tLogFilesAccess );
for ( const auto & tLog: m_dLogFiles )
tOutput.Add ( tLog.m_iExt );
}
bool SingleBinlog_c::BinlogCommit ( int64_t * pTID, const char * szIndexName, FnWriteCommit fnSaver, CSphString & sError )
{
MEMORY ( MEM_BINLOG );
ScopedBinlogMutex_t tLock ( m_tWriteAccess );
int64_t iTID = ++( *pTID );
const int uIndex = GetWriteIndexID ( szIndexName, iTID );
{
BinlogTransactionGuard_c tGuard ( m_tWriter, m_pOwner->m_eFlushFlavour==FlushAction_e::ACTION_NONE );
// header
m_tWriter.PutByte ( Blop_e::ADD_TXN );
m_tWriter.ZipOffset ( uIndex );
m_tWriter.ZipOffset ( iTID );
TransactionSizeGuard_c tPutSize ( m_tWriter );
// save txn data
fnSaver ( m_tWriter );
}
// finalize
if ( !CheckDoFlush () )
{
sError.SetSprintf ( "unable to write to binlog: %s", m_tWriter.GetError ().cstr () );
return false;
}
CheckDoRestart ();
return true;
}
bool SingleBinlog_c::NotifyIndexFlush ( int64_t iFlushedTID, const char * szFlushedIndexName, Shutdown_e eShutdown, ForceSave_e eAction )
{
MEMORY ( MEM_BINLOG );
ScopedBinlogMutex_t tLock ( m_tWriteAccess );
bool bCurrentLogAbandoned = false;
int iPreflushFiles = 0;
int iFinalFiles = 0;
{
Threads::SccWL_t tLogLock ( m_tLogFilesAccess );
assert ( eShutdown || m_dLogFiles.GetLength () );
iPreflushFiles = m_dLogFiles.GetLength ();
// loop through all log files, and check if we can unlink any
ARRAY_FOREACH ( iLog, m_dLogFiles )
{
BinlogFileDesc_t & tLog = m_dLogFiles[iLog];
bool bUsed = false;
// update index info for this log file
for ( auto & tIndex: tLog.m_dIndexInfos )
{
// this index was just flushed, update flushed TID
if ( tIndex.m_sName==szFlushedIndexName )
{
assert ( iFlushedTID>=tIndex.m_iFlushedTID );
tIndex.m_iFlushedTID = Max ( tIndex.m_iFlushedTID, iFlushedTID );
}
// if max logged TID is greater than last flushed TID, log file still has needed recovery data
if ( tIndex.m_iFlushedTID<tIndex.m_iMaxTID )
bUsed = true;
}
// it's needed, keep looking
if ( bUsed )
continue;
// hooray, we can remove this log!
// if this is our current log, we have to close it first
if ( iLog==m_dLogFiles.GetLength ()-1 )
{
m_tWriter.CloseFile ();
bCurrentLogAbandoned = true;
}
// do unlink
CSphString sLog = m_pOwner->MakeBinlogName ( tLog.m_iExt );
if ( ::unlink ( sLog.cstr () ) )
sphWarning ( "binlog: failed to unlink %s: %s", sLog.cstr (), strerrorm ( errno ) );
// we need to reset it, otherwise there might be leftover data after last Remove()
m_dLogFiles[iLog] = {};
// quit tracking it
m_dLogFiles.Remove ( iLog-- );
}
iFinalFiles = m_dLogFiles.GetLength();
} // release m_tLogFilesAccess
if ( bCurrentLogAbandoned && !eShutdown )
{
// if all logs were closed, we can rewind binlog file ext back to 0
if ( !iFinalFiles )
FixNofFiles ( iPreflushFiles );
// if current log was closed, we need a new one (it will automatically save meta, too)
if ( eAction!=DropTable )
OpenNewLog ();
} else if ( iPreflushFiles!=iFinalFiles || eAction!=NoSave )
{
// if we unlinked any logs, we need to save meta, too
SaveMeta();
}
return iFinalFiles==0;
}
void SingleBinlog_c::AdoptIndex ( int iExt, const BinlogIndexInfo_t & tIdx )
{
if ( m_dLogFiles.IsEmpty () )
{
BinlogFileDesc_t & tLog = m_dLogFiles.Add ();
tLog.m_iExt = iExt;
tLog.m_dIndexInfos.Add ( tIdx );
return;
}
if ( m_dLogFiles.Last ().m_iExt!=iExt )
{
BinlogFileDesc_t & tLog = m_dLogFiles.Add ();
tLog.m_iExt = iExt;
tLog.m_dIndexInfos.Add ( tIdx );
return;
}
BinlogFileDesc_t & tLog = m_dLogFiles.Last ();
assert ( tLog.m_iExt==iExt );
tLog.m_dIndexInfos.Add ( tIdx );
}
void SingleBinlog_c::AdoptFile ( int iExt )
{
if ( !m_dLogFiles.IsEmpty () && m_dLogFiles.Last ().m_iExt==iExt )
return;
BinlogFileDesc_t & tLog = m_dLogFiles.Add ();
tLog.m_iExt = iExt;
}
void SingleBinlog_c::OpenNewLog ( BinlogFileState_e eState )
{
MEMORY ( MEM_BINLOG );
// calc new ext
int iExt = 0;
{
Threads::SccWL_t tLogLock ( m_tLogFilesAccess );
if ( !m_dLogFiles.IsEmpty () )
iExt = m_dLogFiles.Last ().m_iExt;
if ( eState==BinlogFileState_e::OK || iExt<0 )
{
iExt = m_pOwner->NextBinlogExt ();
BinlogFileDesc_t tLog;
tLog.m_iExt = iExt;
m_dLogFiles.Add ( std::move ( tLog ) );
}
}
// update meta first then only remove binlog file
SaveMeta ();
// create file
CSphString sLog = m_pOwner->MakeBinlogName ( iExt );
if ( eState!=BinlogFileState_e::OK && eState!=BinlogFileState_e::ERROR_NON_READABLE ) // reuse the last binlog since it is empty or useless.
::unlink ( sLog.cstr () );
CSphString sError;
if ( !m_tWriter.OpenFile ( sLog, sError ) )
sphDie ( "failed to create %s: %s errno=%d, error=%s", sLog.cstr (), sError.cstr (), errno, strerrorm ( errno ) );
// emit header
m_tWriter.PutDword ( BINLOG_HEADER_MAGIC_SPBL );
m_tWriter.PutDword ( BINLOG_VERSION );
}
int64_t SingleBinlog_c::LastTidFor ( const CSphString & sIndex ) const noexcept
{
int64_t iTID = 0;
Threads::SccRL_t tLogLock ( m_tLogFilesAccess );
if ( m_dLogFiles.IsEmpty () )
return iTID;
for ( const auto& tLog : m_dLogFiles )
{
for ( const auto & tIndex: tLog.m_dIndexInfos )
{
if ( tIndex.m_sName == sIndex )
iTID = Max ( tIndex.m_iFlushedTID, iTID );
}
}
return iTID;
}
void SingleBinlog_c::LockWriter ()
{
m_tWriteAccess.Lock();
}
void SingleBinlog_c::UnlockWriter ()
{
m_tWriteAccess.Unlock();
}
void SingleBinlog_c::RemoveLastEmptyLog ()
{
{
Threads::SccWL_t tLock { m_tLogFilesAccess };
assert ( !m_dLogFiles.IsEmpty () && m_dLogFiles.Last ().m_dIndexInfos.IsEmpty () );
// do unlink
CSphString sLog = m_pOwner->MakeBinlogName ( m_dLogFiles.Last ().m_iExt );
if ( ::unlink ( sLog.cstr () ) )
sphWarning ( "binlog: failed to unlink abandoned %s: %s", sLog.cstr (), strerrorm ( errno ) );
// quit tracking it
m_dLogFiles.Pop ();
}
// if we unlinked any logs, we need to save meta, too
SaveMeta ();
}
int SingleBinlog_c::GetWriteIndexID ( const char * szIndexName, int64_t iTID )
{
MEMORY ( MEM_BINLOG );
Threads::SccRL_t tLogLock { m_tLogFilesAccess };
assert ( !m_dLogFiles.IsEmpty () );
// OPTIMIZE? maybe hash them?
BinlogFileDesc_t & tLog = m_dLogFiles.Last ();
ARRAY_FOREACH ( i, tLog.m_dIndexInfos )
{
BinlogIndexInfo_t & tIndex = tLog.m_dIndexInfos[i];
if ( tIndex.m_sName==szIndexName )
{
tIndex.m_iMaxTID = Max ( tIndex.m_iMaxTID, iTID );
return i;
}
}
// create a new entry
int iID = tLog.m_dIndexInfos.GetLength ();
BinlogIndexInfo_t & tIndex = tLog.m_dIndexInfos.Add (); // caller must hold a wlock
tIndex.m_iMinTID = iTID;
tIndex.m_iMaxTID = iTID;
tIndex.m_sName = szIndexName;
tIndex.m_iFlushedTID = 0;
// log this new entry
BinlogTransactionGuard_c tGuard ( m_tWriter, false );
m_tWriter.PutByte ( ADD_INDEX );
m_tWriter.ZipOffset ( iID );
m_tWriter.PutZString ( szIndexName );
// return the index
return iID;
}
// cache is a small summary of affected indexes, it is written at the very end of binlog file when it exceeded size limit,
// before opening new file.
void SingleBinlog_c::DoCacheWrite ()
{
Threads::SccRL_t tLogLock { m_tLogFilesAccess };
if ( m_dLogFiles.IsEmpty () )
return;
assert ( m_tWriter.IsOpen () );
const CSphVector<BinlogIndexInfo_t> & dIndexes = m_dLogFiles.Last ().m_dIndexInfos;
BinlogTransactionGuard_c tGuard ( m_tWriter );
m_tWriter.PutByte ( ADD_CACHE );
m_tWriter.ZipOffset ( dIndexes.GetLength () );
for ( const auto & tIndex: dIndexes )
{
m_tWriter.PutZString ( tIndex.m_sName.cstr () );
m_tWriter.ZipOffset ( tIndex.m_iMinTID );
m_tWriter.ZipOffset ( tIndex.m_iMaxTID );
m_tWriter.ZipOffset ( tIndex.m_iFlushedTID );
}
m_tWriter.Write ();
}
void SingleBinlog_c::CheckDoRestart ()
{
// restart on exceed file size limit
if ( !m_pOwner->m_iRestartSize || m_tWriter.GetFilePos ()<=m_pOwner->m_iRestartSize )
return;
MEMORY ( MEM_BINLOG );
#ifndef NDEBUG
{ Threads::SccRL_t tLock { m_tLogFilesAccess };
assert ( !m_dLogFiles.IsEmpty () ); }
#endif
DoCacheWrite ();
auto sName = m_tWriter.GetFilename();
int iFD = m_tWriter.LeakFD();
bool bFsyncOnClose = m_pOwner->m_eFlushFlavour==FlushAction_e::ACTION_FSYNC_ON_CLOSE;
OpenNewLog ();
Threads::Coro::Go ( [this, iFD, bFsyncOnClose, sName] {
m_tFlushRunning.Wait ( [] ( int iVal ) { return iVal<1; } );
if ( bFsyncOnClose && fsync ( iFD )!=0 )
sphWarning ( "failed to sync %s: %d (%s)", sName.cstr(), errno, strerrorm ( errno ) );
::close ( iFD );
}, Threads::Coro::CurrentScheduler () );
}
bool SingleBinlog_c::CheckDoFlush ()
{
switch ( m_pOwner->m_eFlushFlavour )
{
case FlushAction_e::ACTION_NONE:
break;
case FlushAction_e::ACTION_WRITE:
case FlushAction_e::ACTION_FSYNC_ON_CLOSE:
if ( !m_tWriter.Write () )
return false;
break;
case FlushAction_e::ACTION_FSYNC:
if ( !m_tWriter.WriteAndFsync () )
return false;
break;
default:
assert( false && "wrong binlog flush action flag" );
break;
}
return true;
}
void SingleBinlog_c::SaveMeta ()
{
m_pOwner->SaveMeta ();
}
void SingleBinlog_c::FixNofFiles ( int iFiles )
{
m_pOwner->FixNofFiles (iFiles);
}
//////////////////////////////////////////////////////////////////////////
SingleBinlog_c* Binlog_c::GetWriteIndexBinlog ( const char* szIndexName, bool bOpenNewLog ) NO_THREAD_SAFETY_ANALYSIS
{
if ( m_bCommonBinlog )
return GetSingleWriteIndexBinlog ( bOpenNewLog );
SingleBinlogPtr * pVal;
{
Threads::SccRL_t tLock ( m_tHashAccess );
pVal = m_hBinlogs ( szIndexName );
}
if ( pVal )
return pVal->get();
bool bLocked = false;
{
Threads::SccWL_t tLock ( m_tHashAccess );
pVal = m_hBinlogs ( szIndexName );
if ( pVal ) // the value arrived while we acquired w-lock
return pVal->get ();
m_hBinlogs.Add ( std::make_unique<SingleBinlog_c> (this), szIndexName );
pVal = m_hBinlogs ( szIndexName );
if ( bOpenNewLog )
{
pVal->get ()->LockWriter ();
bLocked = true;
}
}
assert ( pVal );
// OpenNewLog invokes SaveMeta, which excludes m_tHashAccess
if ( bOpenNewLog )
pVal->get ()->OpenNewLog ();
if ( bLocked )
pVal->get ()->UnlockWriter ();
return pVal->get ();
}
SingleBinlog_c * Binlog_c::GetSingleWriteIndexBinlog ( bool bOpenNewLog ) NO_THREAD_SAFETY_ANALYSIS
{
assert ( m_bCommonBinlog );
SingleBinlogPtr * pVal = nullptr;
{
Threads::SccRL_t tLock ( m_tHashAccess );
auto Iter = m_hBinlogs.begin ();
if ( Iter != m_hBinlogs.end() )
pVal = &Iter->second;
}
if ( pVal )
return pVal->get ();
bool bLocked = false;
{
Threads::SccWL_t tLock ( m_tHashAccess );
auto Iter = m_hBinlogs.begin ();
if ( Iter!=m_hBinlogs.end () )
pVal = &Iter->second;
if ( pVal ) // the value arrived while we acquired w-lock
return pVal->get ();
m_hBinlogs.Add ( std::make_unique<SingleBinlog_c> ( this ), "common" );
pVal = &m_hBinlogs.begin ()->second;
if ( bOpenNewLog )
{
pVal->get()->LockWriter();
bLocked = true;
}
}
assert ( pVal );
// OpenNewLog invokes SaveMeta, which excludes m_tHashAccess
if ( bOpenNewLog )
pVal->get ()->OpenNewLog ();
if ( bLocked )
pVal->get ()->UnlockWriter();
return pVal->get ();
}
SingleBinlog_c * Binlog_c::GetFlushIndexBinlog ( const char * szIndexName ) REQUIRES ( m_tHashAccess )
{
SingleBinlogPtr * pVal = nullptr;
if ( !m_bCommonBinlog )
pVal = m_hBinlogs ( szIndexName );
else
{
auto Iter = m_hBinlogs.begin ();
if ( Iter!=m_hBinlogs.end () )
pVal = &Iter->second;
}
if ( !pVal )
return nullptr;
return pVal->get ();
}
Binlog_c::~Binlog_c ()
{
if ( m_bDisabled )
return;
for ( auto & tBinlog: m_hBinlogs )
tBinlog.second->Deinit();
UnlockBinlog ();
}
void Binlog_c::MakeBinlogFilenameTemplate() noexcept
{
m_sBinlogFileNameTemplate = SphSprintf ( "%s/binlog.%%0%dd", m_sLogPath.cstr (), m_iBinlogFileDigits );
}
CSphString Binlog_c::MakeBinlogName ( int iExt ) const noexcept
{
assert ( !m_sBinlogFileNameTemplate.IsEmpty() );
return SphSprintf ( m_sBinlogFileNameTemplate.scstr(), iExt );
}
void Binlog_c::RemoveFile ( int iExt ) NO_THREAD_SAFETY_ANALYSIS
{
auto sLog = MakeBinlogName ( iExt );
::unlink ( sLog.cstr () );
}
// here's been going binlogs with ALL closed indices removing // notify per-index-binlog?
void Binlog_c::NotifyIndexFlush ( int64_t iFlushedTID, const char* szFlushedIndexName, Shutdown_e eShutdown, ForceSave_e eAction )
{
if ( m_bReplayMode )
sphInfo ( "table '%s': ramchunk saved. TID=" INT64_FMT, szFlushedIndexName, iFlushedTID );
if ( !IsBinlogWritable () )
return;
if ( m_bCommonBinlog || eAction!=DropTable )
{
if ( eAction == DropTable )
eAction = ForceSave;
auto pSingleBinlog = GetWriteIndexBinlog ( szFlushedIndexName );
pSingleBinlog->NotifyIndexFlush ( iFlushedTID, szFlushedIndexName, eShutdown, eAction );
return;
}
Threads::SccWL_t tLock ( m_tHashAccess );
auto pSingleBinlog = GetFlushIndexBinlog ( szFlushedIndexName );
if ( !pSingleBinlog )
return;
bool bAbandoned = pSingleBinlog->NotifyIndexFlush ( iFlushedTID, szFlushedIndexName, eShutdown, eAction );
if ( !bAbandoned )
return;
m_hBinlogs.Delete ( szFlushedIndexName );
SaveMetaUnlock();
}
int Binlog_c::NextBinlogExt ()
{
return m_iNextBinlog.fetch_add ( 1, std::memory_order_relaxed );
}
void Binlog_c::FixNofFiles ( int iFiles )
{
m_iNumFiles.fetch_sub ( iFiles );
if ( !m_iNumFiles )
m_iNextBinlog.store ( 0, std::memory_order_release );
}
// run once on startup
void Binlog_c::CheckAndSetPath ( CSphString sBinlogPath )
{
m_sLogPath = std::move ( sBinlogPath );
MakeBinlogFilenameTemplate();
m_bDisabled = m_sLogPath.IsEmpty ();
if ( m_bDisabled )
return;
// pair lock/unlock ensures binlog path is available and writable
LockBinlog ();
UnlockBinlog ();
}
// run once on startup
void Binlog_c::Configure ( const CSphConfigSection & hSearchd, DWORD uReplayFlags )
{
MEMORY ( MEM_BINLOG );
const int iMode = hSearchd.GetInt ( "binlog_flush", 2 );
switch ( iMode )
{
case 0: m_eFlushFlavour = FlushAction_e::ACTION_NONE; break;
case 1: m_eFlushFlavour = FlushAction_e::ACTION_FSYNC; break;
case 2: m_eFlushFlavour = FlushAction_e::ACTION_WRITE; break;
case 3: m_eFlushFlavour = FlushAction_e::ACTION_FSYNC_ON_CLOSE; break;
default: sphDie ( "unknown binlog flush mode %d (must be 0, 1, 2, or 3)\n", iMode );
}
m_iRestartSize = hSearchd.GetSize ( "binlog_max_log_size", m_iRestartSize );
m_uReplayFlags = uReplayFlags;
m_iBinlogFileDigits = hSearchd.GetInt ( "binlog_filename_digits", 4 );
if ( m_bDisabled )
return;
LockBinlog ();
LoadMeta();
MakeBinlogFilenameTemplate ();
}
void Binlog_c::SetCommon ( bool bCommonBinlog )
{
if ( m_bCommonBinlog == bCommonBinlog )
return;
m_bCommonBinlog = bCommonBinlog;
// fixme! add cleanup to avoid 'mixed' binlog
}
bool Binlog_c::IsFlushingEnabled () const
{
return !m_bDisabled && m_eFlushFlavour!=FlushAction_e::ACTION_FSYNC;
}
// executed externally by task binlog flush, every BINLOG_AUTO_FLUSH (1 sec)
void Binlog_c::DoFlush ()
{
assert ( !m_bDisabled );
MEMORY ( MEM_BINLOG );
if ( sphInterrupted () )
return;
m_iLastFlushed.store ( sphMicroTimer (), std::memory_order_relaxed );
CSphVector<std::pair<int, SingleBinlog_c *>> dToFsync;
{
Threads::SccRL_t tLock ( m_tHashAccess );
for ( auto& tBinlog : m_hBinlogs )
dToFsync.Add ( tBinlog.second->DoFlush ( m_eFlushFlavour ) );
}
for ( auto& tSync : dToFsync)
{
if ( sphInterrupted () )
return;
tSync.second->DoFsync ( tSync.first );
}
}
int64_t Binlog_c::NextFlushingTime () const noexcept
{
auto iLastFlushed = m_iLastFlushed.load ( std::memory_order_relaxed );
if ( !iLastFlushed )
return sphMicroTimer () + m_iFlushPeriod;
return iLastFlushed + m_iFlushPeriod;
}
int64_t Binlog_c::LastTidFor ( const CSphString & sIndex ) const noexcept
{
int64_t iTID = 0;
Threads::SccRL_t tLock ( m_tHashAccess );
for ( auto & tBinlog: m_hBinlogs )
iTID = Max ( iTID, tBinlog.second->LastTidFor ( sIndex ) );
return iTID;
}
static constexpr int SAVE_TRIES = 4;
static constexpr int SAVE_TRIE_DELAY = 50;
void Binlog_c::DoSaveMeta ( IntVec_t dFiles, SaveMeta_e eForce )
{
if ( eForce==eNoForce && CompareCurrentFiles ( dFiles ) )
return; // files are same as stored; no need to rewrite meta
CSphVector<BYTE> dMeta;
MemoryWriter2_c wrMeta ( dMeta );
wrMeta.PutDword ( BINLOG_META_MAGIC_SPLI );
wrMeta.PutDword ( BINLOG_VERSION );
wrMeta.PutByte ( m_iBinlogFileDigits );
// StringBuilder_c sMetaLog;
// sMetaLog << "SaveMeta: " << m_bReplayMode << " " << dFiles.GetLength () << ": ";
// sMetaLog.StartBlock ();
m_iNumFiles.store ( dFiles.GetLength (), std::memory_order_relaxed );
wrMeta.ZipInt ( dFiles.GetLength () );
for ( const auto & iExt: dFiles )
{
wrMeta.ZipInt ( iExt ); // everything else is saved in logs themselves
// sMetaLog << iExt;
}
// sphWarning ( "%s", sMetaLog.cstr() );
Threads::SccWL_t rLock { m_tCurrentFilesAccess };
if ( eForce==eNoForce && m_dCurrentFiles==dFiles )
return;
if ( dFiles.IsEmpty() )
m_iNextBinlog.store ( 0, std::memory_order_release );
m_dCurrentFiles.SwapData ( dFiles );
auto sMetaNew = SphSprintf ( "%s/binlog.meta.new", m_sLogPath.cstr () );
auto sMeta = SphSprintf ( "%s/binlog.meta", m_sLogPath.cstr () );
for ( int i=0; i<SAVE_TRIES; ++i )
{
CSphString sError;
CSphWriterNonThrottled wrMetaFile;
::unlink ( sMetaNew.cstr() );
if ( !wrMetaFile.OpenFile ( sMetaNew, sError ) )
sphDie ( "failed to open '%s': '%s'", sMetaNew.cstr(), sError.cstr() );
wrMetaFile.PutBytes ( dMeta.begin(), dMeta.GetLength() );
wrMetaFile.CloseFile();
if ( wrMetaFile.IsError() )
{
sphWarning ( "Error when closing file %s, errno=%d, error=%s, try=%d", sError.cstr(), errno, strerrorm ( errno ), i+1 );
continue;
}
if ( sph::rename ( sMetaNew.cstr(), sMeta.cstr() ) )
{
sphSleepMsec ( SAVE_TRIE_DELAY );
if ( sph::rename ( sMetaNew.cstr (), sMeta.cstr () ) )
{
if ( i<SAVE_TRIES-1 )
{
sphWarning ( "failed to rename meta (src=%s, dst=%s, errno=%d, error=%s), try %d",
sMetaNew.cstr (), sMeta.cstr (), errno, strerrorm ( errno ),
i+1 ); // !COMMIT handle this gracefully
sphSleepMsec ( SAVE_TRIE_DELAY );
} else
sphDie ( "failed to rename meta (src=%s, dst=%s, errno=%d, error=%s)",
sMetaNew.cstr (), sMeta.cstr (), errno,
strerrorm ( errno ) ); // !COMMIT handle this gracefully
} else break;
} else break;
}
sphLogDebug ( "Binlog::SaveMeta: Done (%s)", sMetaNew.cstr() );
}
void Binlog_c::SaveMeta ()
{
MEMORY ( MEM_BINLOG );
DoSaveMeta ( CollectBinlogFiles (), eNoForce );
}
void Binlog_c::SaveMetaUnlock ( SaveMeta_e eForce )
{
MEMORY ( MEM_BINLOG );
DoSaveMeta ( CollectBinlogFilesUnlock (), eForce );
}
IntVec_t Binlog_c::CollectBinlogFiles ()
{
Threads::SccRL_t rLock { m_tHashAccess };
return CollectBinlogFilesUnlock();
}
IntVec_t Binlog_c::CollectBinlogFilesUnlock ()
{
IntVec_t dFiles;
for ( auto & tBinlog: m_hBinlogs )
tBinlog.second->CollectBinlogFiles ( dFiles );
// append not yet processed saved files
for ( int i = m_dSavedFiles.GetLength ()-1; i>=0; --i )
dFiles.Add ( m_dSavedFiles[i] );
return dFiles;
}
bool Binlog_c::CompareCurrentFiles ( const IntVec_t & dFiles )
{
Threads::SccRL_t rLock { m_tCurrentFilesAccess };
return m_dCurrentFiles==dFiles;
}
void Binlog_c::LockBinlog ()
{
assert ( m_iLockFD==-1 );
CSphString sError;
if ( !RawFileLock ( SphSprintf ( "%s/binlog.lock", m_sLogPath.cstr () ), m_iLockFD, sError ) )
sphDie ( "%s", sError.scstr () );
}
void Binlog_c::UnlockBinlog ()
{
RawFileUnLock ( SphSprintf ( "%s/binlog.lock", m_sLogPath.cstr () ), m_iLockFD );
}
bool Binlog_c::IsBinlogWritable () const noexcept
{
if ( m_bReplayMode )
return false;
return !m_bDisabled;
}
// commit stuff. Indexes call this function with serialization cb; binlog is agnostic to alien data structures.
bool Binlog_c::BinlogCommit ( int64_t * pTID, const char* szIndexName, FnWriteCommit fnSaver, CSphString & sError )
{
if ( !IsBinlogWritable () ) // m.b. need to advance TID as index flush according to it
return true;
auto pSingleBinlog = GetWriteIndexBinlog ( szIndexName );
return pSingleBinlog->BinlogCommit ( pTID, szIndexName, std::move ( fnSaver ), sError );
}
// called once on startup from Configure()
void Binlog_c::LoadMeta ()
{
MEMORY ( MEM_BINLOG );
auto sMeta = SphSprintf ( "%s/binlog.meta", m_sLogPath.cstr () );
if ( !sphIsReadable ( sMeta.cstr () ) )
return;
CSphString sError;
// opened and locked, lets read
CSphAutoreader rdMeta;
if ( !rdMeta.Open ( sMeta, sError ) )
sphDie ( "%s error: %s", sMeta.cstr (), sError.cstr () );
if ( rdMeta.GetDword ()!=BINLOG_META_MAGIC_SPLI )
sphDie ( "invalid meta file %s", sMeta.cstr () );
// binlog meta v1 was dev only, crippled, and we don't like it anymore
// binlog metas v2 upto current v4 (and likely up) share the same simplistic format
// so let's support empty (!) binlogs w/ known versions and compatible metas
DWORD uVersion = rdMeta.GetDword ();
if ( uVersion==1 || uVersion>BINLOG_VERSION )
sphDie ( "binlog meta file %s is v.%d, binary is v.%d; recovery requires previous binary version",
sMeta.cstr (), uVersion, BINLOG_VERSION );
auto uByte = rdMeta.GetByte(); // id64 for v<15; num of digits in names for v>=15
m_dSavedFiles.Resize ( rdMeta.UnzipInt () ); // FIXME! sanity check
if ( m_dSavedFiles.IsEmpty () )
return;
// ok, so there is actual recovery data
// could be wrong version of the empty binlog
m_bWrongVersion = ( uVersion!=BINLOG_VERSION );
// let's require that bitness
if ( uVersion < 15 )
{
if ( uByte != 1 )
sphDie ( "tables with 32-bit docids are no longer supported; recovery requires previous binary version" );
} else
m_iBinlogFileDigits = uByte;
assert ( m_iBinlogFileDigits>0 );
// load list of active log files
int iMaxExt = 0;
for ( int i=m_dSavedFiles.GetLength ()-1; i>=0; --i )
{
auto iExt = rdMeta.UnzipInt (); // everything else is saved in logs themselves
if ( iExt>iMaxExt )
iMaxExt = iExt;
m_dSavedFiles[i] = iExt;
}
m_iNextBinlog.store ( iMaxExt+1, std::memory_order_release );
}
// primary call - invoked from daemon once on start
void Binlog_c::Replay ( const SmallStringHash_T<CSphIndex *> & hIndexes, ProgressCallbackSimple_t * pfnProgressCallback ) NO_THREAD_SAFETY_ANALYSIS
{
if ( m_bDisabled )
return;
// on replay started
if ( pfnProgressCallback )
pfnProgressCallback ();
int64_t tmReplay = sphMicroTimer ();
// do replay
m_bReplayMode = true;
BinlogFileState_e ePrevLogState = BinlogFileState_e::OK, eLastLogState = BinlogFileState_e::OK;
SingleBinlog_c* pPrevInfo = nullptr, *pInfo = nullptr;
while ( !m_dSavedFiles.IsEmpty() )
{
auto iExt = m_dSavedFiles.Last();
BinlogReplayFileDesc_t tLog;
tLog.m_iExt = iExt;
ePrevLogState = std::exchange ( eLastLogState, ReplayBinlog ( tLog, hIndexes ) );
if ( pfnProgressCallback ) // on each replayed binlog
pfnProgressCallback ();
m_dSavedFiles.Pop();
if ( tLog.m_dIndexInfos.IsEmpty() )
{
if ( pInfo )
pInfo->AdoptFile ( iExt );
else
RemoveFile ( iExt );
}
for ( const auto& tInfo : tLog.m_dIndexInfos )
{
pInfo = GetWriteIndexBinlog ( tInfo.m_sName.cstr (), false );
pInfo->AdoptIndex ( tLog.m_iExt, tInfo );
if ( pInfo!=pPrevInfo )
{
if ( pPrevInfo )
pPrevInfo->OpenNewLog ( ePrevLogState );
pPrevInfo = pInfo;
}
}
}
if ( !pInfo && !pPrevInfo )
SaveMetaUnlock ( eForce );
if ( pPrevInfo )
pPrevInfo->OpenNewLog ( eLastLogState );
if ( m_dSavedFiles.GetLength ()>0 )
{
tmReplay = sphMicroTimer ()-tmReplay;
sphInfo ( "binlog: finished replaying total %d in %d.%03d sec",
m_dSavedFiles.GetLength (),
(int) ( tmReplay/1000000 ), (int) ( ( tmReplay/1000 )%1000 ) );
}
// FIXME?
// in some cases, indexes might had been flushed during replay
// and we might therefore want to update m_iFlushedTID everywhere
// but for now, let's just wait until next flush for simplicity
// resume normal operation
m_bReplayMode = false;
}
BinlogFileState_e Binlog_c::ReplayBinlog ( BinlogReplayFileDesc_t & tLog, const SmallStringHash_T<CSphIndex*> & hIndexes ) NO_THREAD_SAFETY_ANALYSIS
{
CSphString sError;
const CSphString sLog ( MakeBinlogName ( tLog.m_iExt ) );
// open, check, play
sphInfo ( "binlog: replaying log %s", sLog.cstr() );
BinlogReader_c tReader;
if ( !tReader.Open ( sLog, sError ) )
{
Log ( REPLAY_IGNORE_OPEN_ERROR, "binlog: log open error: %s", sError.cstr() );
return BinlogFileState_e::ERROR_NON_READABLE;
}
const SphOffset_t iFileSize = tReader.GetFilesize();
if ( !iFileSize )
{
sphWarning ( "binlog: empty binlog %s detected, skipping", sLog.cstr() );
return BinlogFileState_e::ERROR_EMPTY_0;
}
if ( tReader.GetDword()!=BINLOG_HEADER_MAGIC_SPBL )
{
Log ( REPLAY_IGNORE_TRX_ERROR, "binlog: log %s missing magic header (corrupted?)", sLog.cstr() );
return BinlogFileState_e::ERROR_WRONG_FILE;
}
DWORD uVersion = tReader.GetDword();
if ( tReader.GetErrorFlag() )
sphWarning ( "binlog: log io error at pos=" INT64_FMT ": %s", tReader.GetPos(), sError.cstr() );
// could replay empty binlog of the old version
m_bWrongVersion = ( uVersion!=BINLOG_VERSION );
if ( iFileSize==8 ) // couple of DWORDs we just read - header and version
{
sphWarning ( "binlog: empty binlog %s detected, skipping", sLog.cstr () );
return BinlogFileState_e::ERROR_EMPTY_8;
}
/////////////
// do replay
/////////////
std::array<int, TOTAL+1> dTotal {0};
// !COMMIT
// instead of simply replaying everything, we should check whether this binlog is clean
// by loading and checking the cache stored at its very end
tLog.m_dIndexInfos.Reset();
bool bReplayOK = true;
bool bHaveCacheOp = false;
int64_t iPos = -1;
int64_t tmReplay = sphMicroTimer();
while ( iFileSize!=tReader.GetPos() && !tReader.GetErrorFlag() && bReplayOK )
{
iPos = tReader.GetPos();
if ( tReader.GetDword()!=BLOP_MAGIC_TXN_ )
{
Log ( REPLAY_IGNORE_TRX_ERROR, "binlog: log missing txn marker at pos=" INT64_FMT " (corrupted?)", iPos );
bReplayOK = false;
break;
}
tReader.ResetCrc ();
const auto uOp = (Blop_e) tReader.GetByte ();
if ( uOp<=0 || uOp>=TOTAL )
{
Log ( REPLAY_IGNORE_TRX_ERROR, "binlog: unexpected entry (blop=" UINT64_FMT ", pos=" INT64_FMT ")", uOp, iPos );
bReplayOK = false;
break;
}
if ( m_bWrongVersion && uOp!=ADD_CACHE )
{
Log ( REPLAY_IGNORE_TRX_ERROR, "binlog: log %s is v.%d, binary is v.%d; recovery requires previous binary version", sLog.cstr(), uVersion, BINLOG_VERSION );
bReplayOK = false;
break;
}
// FIXME! blop might be OK but skipped (eg. index that is no longer)
switch ( uOp )
{
case ADD_INDEX:
bReplayOK = ReplayIndexAdd ( tLog, hIndexes, tReader );
break;
case ADD_CACHE:
if ( bHaveCacheOp )
{
Log ( REPLAY_IGNORE_TRX_ERROR, "binlog: internal error, second BLOP_ADD_CACHE detected (corruption?)" );
bReplayOK = false;
break;
}
bHaveCacheOp = true;
bReplayOK = ReplayCacheAdd ( tLog, uVersion, tReader );
break;
case ADD_TXN:
bReplayOK = ReplayTxn ( tLog, tReader );
break;
default:
Log ( REPLAY_IGNORE_TRX_ERROR, "binlog: internal error, unhandled entry (blop=%d)", (int)uOp );
bReplayOK = false;
break;
}
dTotal [ uOp ] += bReplayOK ? 1 : 0;
++dTotal [ TOTAL ];
}
tmReplay = sphMicroTimer() - tmReplay;
if ( tReader.GetErrorFlag() )
sphWarning ( "binlog: log io error at pos=" INT64_FMT ": %s", iPos, sError.cstr() );
if ( !bReplayOK )
sphWarning ( "binlog: replay error at pos=" INT64_FMT , iPos );
// show additional replay statistics
for ( const auto& tIndex : tLog.m_dIndexInfos )
{
if ( tIndex.m_iPreReplayTID < tIndex.m_iMaxTID )
{
sphInfo ( "binlog: table %s: recovered from tid " INT64_FMT " to tid " INT64_FMT,
tIndex.m_sName.cstr(), tIndex.m_iPreReplayTID, tIndex.m_iMaxTID );
} else
{
sphInfo ( "binlog: table %s: skipped at tid " INT64_FMT " and max binlog tid " INT64_FMT,
tIndex.m_sName.cstr(), tIndex.m_iPreReplayTID, tIndex.m_iMaxTID );
}
}
sphInfo ( "binlog: replay stats: %d commits, %d tables", dTotal[ADD_TXN], dTotal[ADD_INDEX] );
sphInfo ( "binlog: finished replaying %s; %d.%d MB in %d.%03d sec",
sLog.cstr(),
(int)(iFileSize/1048576), (int)((iFileSize*10/1048576)%10),
(int)(tmReplay/1000000), (int)((tmReplay/1000)%1000) );
// only one operation, that is Add Cache - by the fact, empty binlog
return ( bHaveCacheOp && dTotal[TOTAL]==1 ) ? BinlogFileState_e::ERROR_ABANDONED : BinlogFileState_e::OK;
}
bool Binlog_c::ReplayIndexAdd ( BinlogReplayFileDesc_t & tLog, const SmallStringHash_T<CSphIndex*> & hIndexes, BinlogReader_c & tReader ) const NO_THREAD_SAFETY_ANALYSIS
{
// load and check index
const int64_t iTxnPos = tReader.GetPos(); // that is purely for reporting anomalies
uint64_t uVal = tReader.UnzipOffset();
// load data
CSphString sName = tReader.GetZString();
if ( !tReader.CheckCrc ( "indexadd", sName.cstr(), 0, iTxnPos ) )
return false;
if ( (int) uVal!=tLog.m_dIndexInfos.GetLength () )
{
Log ( REPLAY_IGNORE_TRX_ERROR,
"binlog: indexadd: unexpected table id (id=" UINT64_FMT ", expected=%d, pos=" INT64_FMT ")",
uVal, tLog.m_dIndexInfos.GetLength (), iTxnPos );
return false;
}
// check for index name dupes
ARRAY_FOREACH ( i, tLog.m_dIndexInfos )
{
if ( tLog.m_dIndexInfos[i].m_sName == sName )
{
Log ( REPLAY_IGNORE_TRX_ERROR, "binlog: duplicate table name (name=%s, dupeid=%d, pos=" INT64_FMT ")",
sName.cstr(), i, iTxnPos );
return false;
}
}
// not a dupe, lets add
BinlogIndexReplayInfo_t & tIndex = tLog.m_dIndexInfos.Add();
tIndex.m_sName = sName;
// lookup index in the list of currently served ones
CSphIndex ** ppIndex = hIndexes ( sName.cstr() );
CSphIndex * pIndex = ppIndex ? (*ppIndex) : nullptr;
if ( pIndex )
{
tIndex.m_pIndex = pIndex;
tIndex.m_iPreReplayTID = pIndex->m_iTID;
tIndex.m_iFlushedTID = pIndex->m_iTID;
}
// all ok
// TID ranges will be now recomputed as we replay
return true;
}
bool Binlog_c::ReplayCacheAdd ( const BinlogReplayFileDesc_t & tLog, DWORD uVersion, BinlogReader_c & tReader ) const NO_THREAD_SAFETY_ANALYSIS
{
const int64_t iTxnPos = tReader.GetPos();
// check data
int iCache = tReader.UnzipOffset (); // FIXME! sanity check
if ( m_bWrongVersion && iCache )
{
Log ( REPLAY_IGNORE_TRX_ERROR, "binlog: log %s is v.%d, binary is v.%d; recovery requires previous binary version", tReader.GetFilename().cstr(), uVersion, BINLOG_VERSION );
return false;
}
BinlogIndexInfo_t tCache;
for ( int i = 0; i<iCache; ++i )
{
tCache.m_sName = tReader.GetZString ();
tCache.m_iMinTID = tReader.UnzipOffset ();
tCache.m_iMaxTID = tReader.UnzipOffset ();
tCache.m_iFlushedTID = tReader.UnzipOffset ();
const BinlogIndexInfo_t & tIndex = tLog.m_dIndexInfos[i];
if ( tCache.m_sName!=tIndex.m_sName )
{
sphWarning ( "binlog: cache mismatch: table %d name mismatch (%s cached, %s replayed)",
i, tCache.m_sName.cstr (), tIndex.m_sName.cstr () );
continue;
}
if ( tCache.m_iMinTID!=tIndex.m_iMinTID || tCache.m_iMaxTID!=tIndex.m_iMaxTID )
{
sphWarning ( "binlog: cache mismatch: table %s tid ranges mismatch (cached " INT64_FMT " to " INT64_FMT ", replayed " INT64_FMT " to " INT64_FMT ")",
tCache.m_sName.cstr (), tCache.m_iMinTID, tCache.m_iMaxTID, tIndex.m_iMinTID, tIndex.m_iMaxTID );
}
}
if ( !tReader.CheckCrc ( "cache", "", 0, iTxnPos ) )
return false;
// if we arrived here by replay, let's verify everything
// note that cached infos just passed checksumming, so the file is supposed to be clean!
// in any case, broken log or not, we probably managed to replay something
// so let's just report differences as warnings
if ( iCache!=tLog.m_dIndexInfos.GetLength() )
sphWarning ( "binlog: cache mismatch: %d tables cached, %d replayed", iCache, tLog.m_dIndexInfos.GetLength() );
return true;
}
//////////////////////////////////////////////////////////////////////////
/// custom replay stuff
//////////////////////////////////////////////////////////////////////////
// helper used in about all replay ops
int Binlog_c::ReplayIndexID ( CSphReader & tReader, const BinlogReplayFileDesc_t & tLog ) const
{
const int64_t iTxnPos = tReader.GetPos();
const int iVal = (int)tReader.UnzipOffset();
if ( iVal<0 || iVal>=tLog.m_dIndexInfos.GetLength() )
{
Log ( REPLAY_IGNORE_TRX_ERROR, "binlog: unexpected table id (id=%d, max=%d, pos=" INT64_FMT ")", iVal, tLog.m_dIndexInfos.GetLength(), iTxnPos );
return -1;
}
return iVal;
}
namespace {
void SkipBytes ( CSphAutoreader& tReader, DWORD uSize )
{
auto uFakeBuf = Min ( uSize, 4096 );
CSphFixedVector<BYTE> dFakeBuf { uFakeBuf };
while ( uSize )
{
tReader.GetBytes ( dFakeBuf.begin (), uFakeBuf );
uSize -= uFakeBuf;
uFakeBuf = Min ( uSize, 4096 );
}
}
}
bool Binlog_c::ReplayTxn ( const BinlogReplayFileDesc_t & tLog, BinlogReader_c & tReader ) const NO_THREAD_SAFETY_ANALYSIS
{
// load and lookup index
const int64_t iTxnPos = tReader.GetPos();
int iIdx = ReplayIndexID ( tReader, tLog );
if ( iIdx==-1 )
return false;
BinlogIndexReplayInfo_t & tIndex = tLog.m_dIndexInfos[iIdx];
// load transaction data
auto iTID = (int64_t) tReader.UnzipOffset();
auto uSize = tReader.GetDword();
// skip txns of non-existent (deleted) indexes (skip blobs by size)
if ( !tIndex.m_pIndex || iTID<=tIndex.m_pIndex->m_iTID || tIndex.m_pIndex->m_iTID==-1 )
{
tIndex.m_iMinTID = Min ( tIndex.m_iMinTID, iTID );
tIndex.m_iFlushedTID = tIndex.m_iMaxTID = Max ( tIndex.m_iMaxTID, iTID );
// just skip the blob
SkipBytes ( tReader, uSize );
// checksum
return !tReader.GetErrorFlag () && tReader.CheckCrc ( "skip", tIndex.m_sName.cstr (), iTID, iTxnPos );
}
assert ( tIndex.m_pIndex );
CSphString sError;
BYTE uOp = tReader.GetByte();
CSphString sOp = SzTxnName ( (Txn_e) uOp );
CheckTnxResult_t tReplayed = tIndex.m_pIndex->ReplayTxn ( tReader, sError, uOp, [ iTxnPos, iTID, this, &tReader, &tIndex, &sOp ] {
CheckTnxResult_t tRes;
tRes.m_bValid = PerformChecks ( sOp.cstr (), tIndex, iTID, iTxnPos, tReader );
if ( !tRes.m_bValid )
return tRes;
// only replay transaction when index exists and does not have it yet (based on TID)
if ( tIndex.m_pIndex && iTID>tIndex.m_pIndex->m_iTID )
{
tRes.m_bApply = true;
// we normally expect per-index TIDs to be sequential
// but let's be graceful about that
if ( iTID!=tIndex.m_pIndex->m_iTID+1 )
sphWarning (
"binlog: %s: unexpected tid (table=%s, indextid=" INT64_FMT ", logtid=" INT64_FMT ", pos=" INT64_FMT ")",
sOp.cstr (), tIndex.m_sName.cstr (), tIndex.m_pIndex->m_iTID, iTID, iTxnPos );
}
return tRes;
});
// could be invalid TXN in binlog
if ( !tReplayed.m_bValid )
{
Log ( REPLAY_IGNORE_TRX_ERROR, "binlog: %s (table=%s, lasttid=" INT64_FMT ", logtid=" INT64_FMT ", pos=" INT64_FMT ", error=%s)",
sOp.cstr (), tIndex.m_sName.cstr(), tIndex.m_iMaxTID, iTID, iTxnPos, sError.cstr() );
return false;
}
// could be TXN in binlog that index already has should not apply again that TXN and should not change index TID by that TXN
if ( tReplayed.m_bApply )
{
// update committed tid on replay in case of unexpected / mismatched tid
tIndex.m_pIndex->m_iTID = iTID;
}
tIndex.m_iMinTID = Min ( tIndex.m_iMinTID, iTID );
tIndex.m_iMaxTID = Max ( tIndex.m_iMaxTID, iTID );
return true;
}
bool Binlog_c::MockDisabled ( bool bNewVal )
{
return std::exchange ( m_bDisabled, bNewVal );
}
bool Binlog_c::PerformChecks ( const char * szOp, BinlogIndexInfo_t & tIndex, int64_t iTID, int64_t iTxnPos, BinlogReader_c & tReader ) const
{
// checksum
if ( tReader.GetErrorFlag () || !tReader.CheckCrc ( szOp, tIndex.m_sName.cstr (), iTID, iTxnPos ) )
return false;
// check TID
if ( iTID<tIndex.m_iMaxTID )
{
Log ( REPLAY_IGNORE_TRX_ERROR, "binlog: %s: descending tid (table=%s, lasttid=" INT64_FMT ", logtid=" INT64_FMT ", pos=" INT64_FMT ")",
szOp, tIndex.m_sName.cstr (), tIndex.m_iMaxTID, iTID, iTxnPos );
return false;
}
return true;
}
static auto & g_bRTChangesAllowed = RTChangesAllowed();
void Binlog::Init ( CSphString sBinlogPath )
{
MEMORY ( MEM_BINLOG );
g_bRTChangesAllowed = false;
g_pRtBinlog.reset ( new Binlog_c );
if ( !g_pRtBinlog )
sphDie ( "binlog: failed to create binlog" );
// check binlog path before detaching from the console - since we call sphDie on failure, and it should be visible.
g_pRtBinlog->CheckAndSetPath ( std::move ( sBinlogPath ) );
}
void Binlog::SetCommon ( bool bCommonBinlog )
{
assert ( g_pRtBinlog );
g_pRtBinlog->SetCommon ( bCommonBinlog );
}
void Binlog::Configure ( const CSphConfigSection & hSearchd, DWORD uReplayFlags )
{
assert ( g_pRtBinlog );
g_pRtBinlog->Configure ( hSearchd, uReplayFlags );
}
void Binlog::Deinit ()
{
g_pRtBinlog.reset();
}
void Binlog::Replay ( const SmallStringHash_T<CSphIndex*> & hIndexes, ProgressCallbackSimple_t * pfnProgressCallback )
{
MEMORY ( MEM_BINLOG );
g_pRtBinlog->Replay ( hIndexes, pfnProgressCallback );
g_bRTChangesAllowed = true;
}
bool Binlog::IsActive()
{
if ( !g_pRtBinlog )
return false;
return g_pRtBinlog->IsActive();
}
bool Binlog::MockDisabled ( bool bNewVal )
{
if ( g_pRtBinlog )
return g_pRtBinlog->MockDisabled ( bNewVal );
return bNewVal;
}
bool Binlog::Commit ( int64_t * pTID, const char* szIndexName, CSphString & sError, FnWriteCommit && fnSaver )
{
if ( !g_pRtBinlog )
return true;
if ( *pTID==-1 )
return true;
return g_pRtBinlog->BinlogCommit ( pTID, szIndexName, std::move (fnSaver), sError );
}
void Binlog::NotifyIndexFlush ( int64_t iTID, const char * szIndexName, Shutdown_e eShutdown, ForceSave_e eAction )
{
if ( !g_pRtBinlog )
return;
if ( iTID==-1 )
return;
g_pRtBinlog->NotifyIndexFlush ( iTID, szIndexName, eShutdown, eAction );
}
CSphString Binlog::GetPath()
{
if ( g_pRtBinlog )
return g_pRtBinlog->GetLogPath();
return "";
}
int64_t Binlog::LastTidFor ( const CSphString & sIndex )
{
if ( !g_pRtBinlog )
return 0;
return g_pRtBinlog->LastTidFor ( sIndex );
}
bool Binlog::IsFlushEnabled ()
{
if ( !g_pRtBinlog )
return false;
return g_pRtBinlog->IsFlushingEnabled ();
}
void Binlog::Flush ()
{
if ( !g_pRtBinlog )
return;
g_pRtBinlog->DoFlush ();
}
int64_t Binlog::NextFlushTimestamp ()
{
if ( !g_pRtBinlog )
return -1;
return g_pRtBinlog->NextFlushingTime ();
}
void Binlog_c::Log ( DWORD uFlag, const char * sTemplate, ... ) const
{
va_list ap;
va_start ( ap, sTemplate );
if ( ( m_uReplayFlags & uFlag )==0 )
{
sphDieVa ( sTemplate, ap );
exit ( 1 );
} else if ( g_eLogLevel>=SPH_LOG_WARNING )
{
sphLogVa ( sTemplate, ap, SPH_LOG_WARNING );
}
va_end ( ap );
}
| 56,714
|
C++
|
.cpp
| 1,621
| 32.510179
| 175
| 0.695453
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,932
|
sphinxexcerpt.cpp
|
manticoresoftware_manticoresearch/src/sphinxexcerpt.cpp
|
//
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxexcerpt.h"
#include "sphinxutils.h"
#include "sphinxsearch.h"
#include "sphinxquery.h"
#include "fileutils.h"
#include "sphinxstem.h"
#include "coroutine.h"
#include "memio.h"
#include "snippetfunctor.h"
#include "snippetindex.h"
#include "snippetstream.h"
#include "snippetpassage.h"
#include "stripper/html_stripper.h"
#include "tokenizer/tokenizer.h"
#include "dict/dict_base.h"
#include <math.h>
static bool TransformMacro ( CSphString & sSrc, CSphString & sPost, const char * szMacro )
{
const char * sPass = NULL;
if ( !sSrc.IsEmpty() )
sPass = strstr ( sSrc.cstr(), szMacro );
if ( !sPass )
return false;
int iSrcLen = sSrc.Length();
auto iPassLen = (int)strlen(szMacro);
int iTailLen = iSrcLen - iPassLen - int ( sPass - sSrc.cstr() );
// copy tail
if ( iTailLen )
sPost.SetBinary ( sPass+iPassLen, iTailLen );
CSphString sPre;
sPre.SetBinary ( sSrc.cstr(), int ( sPass - sSrc.cstr() ) );
sSrc.Swap ( sPre );
return true;
}
static bool SnippetTransformPassageMacros ( CSphString & sSrc, CSphString & sPost )
{
bool bRes1 = TransformMacro ( sSrc, sPost, "%PASSAGE_ID%");
bool bRes2 = TransformMacro ( sSrc, sPost, "%SNIPPET_ID%");
return bRes1 || bRes2;
}
//////////////////////////////////////////////////////////////////////////
void SnippetLimits_t::Format ( StringBuilder_c & tOut, const char * szPrefix ) const
{
SnippetLimits_t tDefault;
if ( m_iLimit!=tDefault.m_iLimit ) tOut.Appendf ( "%slimit=%d", szPrefix, m_iLimit );
if ( m_iLimitWords!=tDefault.m_iLimitWords ) tOut.Appendf ( "%slimit_words=%d", szPrefix, m_iLimitWords );
if ( m_iLimitPassages!=tDefault.m_iLimitPassages ) tOut.Appendf ( "%slimit_snippets=%d", szPrefix, m_iLimitPassages );
}
/////////////////////////////////////////////////////////////////////////////
void SnippetQuerySettings_t::Setup()
{
m_bHasBeforePassageMacro = SnippetTransformPassageMacros ( m_sBeforeMatch, m_sBeforeMatchPassage );
m_bHasAfterPassageMacro = SnippetTransformPassageMacros ( m_sAfterMatch, m_sAfterMatchPassage );
}
CSphString SnippetQuerySettings_t::AsString() const
{
SnippetQuerySettings_t tDefault;
StringBuilder_c tOut;
tOut.StartBlock ( dJsonObj );
SnippetLimits_t::Format ( tOut, "" );
if ( m_sBeforeMatch!=tDefault.m_sBeforeMatch ) tOut.Appendf ( "before_match='%s'", m_sBeforeMatch.cstr() );
if ( m_sAfterMatch!=tDefault.m_sAfterMatch ) tOut.Appendf ( "after_match='%s'", m_sAfterMatch.cstr() );
if ( m_sChunkSeparator!=tDefault.m_sChunkSeparator ) tOut.Appendf ( "snippet_separator='%s'",m_sChunkSeparator.cstr() );
if ( m_sFieldSeparator!=tDefault.m_sFieldSeparator ) tOut.Appendf ( "field_separator='%s'", m_sFieldSeparator.cstr() );
if ( m_sStripMode!=tDefault.m_sStripMode ) tOut.Appendf ( "html_strip_mode='%s'", m_sStripMode.cstr() );
if ( m_iAround!=tDefault.m_iAround ) tOut.Appendf ( "around=%d", m_iAround );
if ( m_iPassageId!=tDefault.m_iPassageId ) tOut.Appendf ( "start_snippet_id=%d", m_iPassageId );
if ( m_bUseBoundaries!=tDefault.m_bUseBoundaries ) tOut.Appendf ( "use_boundaries=%d", m_bUseBoundaries ? 1 : 0 );
if ( m_bWeightOrder!=tDefault.m_bWeightOrder ) tOut.Appendf ( "weight_order=%d", m_bWeightOrder ? 1 : 0 );
if ( m_bForceAllWords!=tDefault.m_bForceAllWords ) tOut.Appendf ( "force_all_words=%d", m_bForceAllWords ? 1 : 0 );
if ( m_bAllowEmpty!=tDefault.m_bAllowEmpty ) tOut.Appendf ( "allow_empty=%d", m_bAllowEmpty ? 1 : 0 );
if ( m_bEmitZones!=tDefault.m_bEmitZones ) tOut.Appendf ( "emit_zones=%d", m_bEmitZones ? 1 : 0 );
if ( m_bForcePassages!=tDefault.m_bForcePassages ) tOut.Appendf ( "force_snippets=%d", m_bForcePassages ? 1 : 0 );
if ( m_bJsonQuery!=tDefault.m_bJsonQuery ) tOut.Appendf ( "json_query=%d", m_bJsonQuery ? 1 : 0 );
if ( m_ePassageSPZ!=tDefault.m_ePassageSPZ ) tOut.Appendf ( "snippet_boundary='%s'", PassageBoundarySz(m_ePassageSPZ) );
if ( m_bPackFields!=tDefault.m_bPackFields ) tOut.Appendf ( "pack_fields=%d", m_bPackFields ? 1 : 0 );
if ( m_bLimitsPerField!=tDefault.m_bLimitsPerField ) tOut.Appendf ( "limits_per_field=%d", m_bLimitsPerField ? 1 : 0 );
for ( const auto& tPerFieldLimit: m_hPerFieldLimits )
{
CSphString sPrefix;
sPrefix.SetSprintf ( "__%s_", tPerFieldLimit.first.cstr() );
tPerFieldLimit.second.Format ( tOut, sPrefix.cstr() );
}
if ( m_uFilesMode!=tDefault.m_uFilesMode )
{
if ( m_uFilesMode & 1 ) tOut << "load_files=1";
if ( m_uFilesMode & 2 ) tOut << "load_files_scattered=1";
}
tOut.FinishBlock(false);
return tOut.cstr();
}
/////////////////////////////////////////////////////////////////////////////
#define UINT32_MASK 0xffffffffUL
#define UINT16_MASK 0xffff
struct DocQueryZonePair_t
{
int m_iDoc;
int m_iQuery;
bool operator<( const DocQueryZonePair_t & b ) const { return m_iDoc<b.m_iDoc; }
bool operator>( const DocQueryZonePair_t & b ) const { return m_iDoc>b.m_iDoc; }
bool operator==( const DocQueryZonePair_t & b ) const { return m_iDoc==b.m_iDoc; }
};
/// hit-in-zone check implementation for the matching engine
class SnippetZoneChecker_c : public ISphZoneCheck
{
public:
SnippetZoneChecker_c ( const CSphVector<ZonePacked_t> & dDocZones, const SmallStringHash_T<int> & hDocNames, const StrVec_t & dQueryZones )
{
if ( !dQueryZones.GetLength() )
return;
CSphVector<DocQueryZonePair_t> dCheckedZones;
ARRAY_FOREACH ( i, dQueryZones )
{
int * pZone = hDocNames ( dQueryZones[i] );
if ( pZone )
{
DocQueryZonePair_t & tPair = dCheckedZones.Add ();
tPair.m_iDoc = *pZone;
tPair.m_iQuery = i;
}
}
dCheckedZones.Sort();
m_dZones.Resize ( dQueryZones.GetLength() );
ARRAY_FOREACH ( i, dDocZones )
{
uint64_t uZonePacked = dDocZones[i];
DWORD uPos = (DWORD)( ( uZonePacked >>32 ) & UINT32_MASK );
int iSibling = (int)( ( uZonePacked>>16 ) & UINT16_MASK );
int iZone = (int)( uZonePacked & UINT16_MASK );
assert ( iSibling>=0 && iSibling<dDocZones.GetLength() );
assert ( iZone==(int)( dDocZones[iSibling] & UINT16_MASK ) );
// skip cases:
// + close zone (tSpan.m_iSibling<i) - skipped
// + open without close zone (tSpan.m_iSibling==i) - skipped
// + open zone position > close zone position
// + zone type not in query zones
if ( iSibling<=i || uPos>=( ( dDocZones[iSibling]>>32 ) & UINT32_MASK ) )
continue;
DocQueryZonePair_t tRefZone;
tRefZone.m_iDoc = iZone;
const DocQueryZonePair_t * pPair = dCheckedZones.BinarySearch ( tRefZone );
if ( !pPair )
continue;
uint64_t uClosePacked = dDocZones[iSibling];
DWORD uClosePos = ( (int)( uClosePacked>>32 ) & UINT32_MASK );
ZoneHits_t & tZone = m_dZones[pPair->m_iQuery];
tZone.m_dStarts.Add ( uPos );
tZone.m_dEnds.Add ( uClosePos );
}
#ifndef NDEBUG
ARRAY_FOREACH ( i, m_dZones )
{
const ZoneHits_t & tZone = m_dZones[i];
assert ( tZone.m_dStarts.GetLength()==tZone.m_dEnds.GetLength() );
const Hitpos_t * pHit = tZone.m_dStarts.Begin()+1;
const Hitpos_t * pMax = tZone.m_dStarts.Begin()+tZone.m_dStarts.GetLength();
for ( ; pHit<pMax; pHit++ )
assert ( pHit[-1]<pHit[0] );
pHit = tZone.m_dEnds.Begin()+1;
pMax = tZone.m_dEnds.Begin()+tZone.m_dEnds.GetLength();
for ( ; pHit<pMax; pHit++ )
assert ( pHit[-1]<pHit[0] );
}
#endif
}
SphZoneHit_e IsInZone ( int iZone, const ExtHit_t * pHit, int * pLastSpan ) final
{
DWORD uPosWithField = HITMAN::GetPosWithField ( pHit->m_uHitpos );
int iOpen = FindSpan ( m_dZones[iZone].m_dStarts, uPosWithField );
if ( pLastSpan )
* pLastSpan = iOpen;
return ( iOpen>=0 && uPosWithField<=m_dZones[iZone].m_dEnds[iOpen] ) ? SPH_ZONE_FOUND : SPH_ZONE_NO_SPAN;
}
private:
CSphVector<ZoneHits_t> m_dZones;
};
//////////////////////////////////////////////////////////////////////////
/// snippets query words for different cases
class SnippetsFastQword_c : public ISphQword
{
public:
explicit SnippetsFastQword_c ( const CSphVector<DWORD> * pHits )
: m_pHits ( pHits )
, m_uLastPos ( 0 )
{}
void Setup ( DWORD uLastPos )
{
m_iDocs = 0;
m_iHits = 0;
m_uLastPos = uLastPos;
if ( m_pHits && m_pHits->GetLength() )
{
m_iDocs = 1;
m_iHits = m_pHits->GetLength();
m_uMatchHits = 0;
m_bHasHitlist = true;
}
}
bool HasHits () const
{
return m_pHits && m_uMatchHits<(DWORD)m_pHits->GetLength();
}
const CSphMatch & GetNextDoc() override
{
m_dQwordFields.SetAll();
m_tMatch.m_tRowID = m_tMatch.m_tRowID==INVALID_ROWID && HasHits() ? 0 : INVALID_ROWID;
return m_tMatch;
}
Hitpos_t GetNextHit () override
{
if ( !HasHits() )
return EMPTY_HIT;
DWORD uPosition = *( m_pHits->Begin() + m_uMatchHits++ );
return HITMAN::Create ( HITMAN::GetField(uPosition), HITMAN::GetPos(uPosition), (m_uLastPos==uPosition) );
}
void SeekHitlist ( SphOffset_t ) override {}
private:
const CSphVector<DWORD> * m_pHits;
CSphMatch m_tMatch;
DWORD m_uLastPos;
};
/// snippets query word setup
class SnippetsFastQwordSetup_c : public ISphQwordSetup
{
public:
explicit SnippetsFastQwordSetup_c ( const SnippetsDocIndex_c & tIndex )
: m_tIndex ( tIndex )
{}
ISphQword * QwordSpawn ( const XQKeyword_t & tWord ) const final
{
return new SnippetsFastQword_c ( m_tIndex.GetHitlist ( tWord, m_pDict ) );
}
bool QwordSetup ( ISphQword * pQword ) const final
{
SnippetsFastQword_c * pWord = (SnippetsFastQword_c *)pQword;
pWord->Setup ( m_tIndex.GetLastPos() );
return true;
}
ISphQword * ScanSpawn() const override
{
int iDocs = ( m_tIndex.GetDocHits().GetLength() ? 1 : 0 );
return new QwordScan_c ( iDocs );
}
private:
const SnippetsDocIndex_c & m_tIndex;
};
inline bool operator < ( const SphHitMark_t & a, const SphHitMark_t & b )
{
return a.m_uPosition < b.m_uPosition;
}
// with sentence in query we should consider SENTECE, PARAGRAPH, ZONE
// with paragraph in query we should consider PARAGRAPH, ZONE
// with zone in query we should consider ZONE
static int ConvertSPZ ( DWORD eSPZ )
{
if ( eSPZ & SPH_SPZ_SENTENCE )
return MAGIC_CODE_SENTENCE;
else if ( eSPZ & SPH_SPZ_PARAGRAPH )
return MAGIC_CODE_PARAGRAPH;
else if ( eSPZ & SPH_SPZ_ZONE )
return MAGIC_CODE_ZONE;
else
return 0;
}
struct ScopedStreamers_t
{
public:
CSphVector<CacheStreamer_i *> m_dStreamers;
CSphVector<SnippetLimits_t> m_dLimits;
ScopedStreamers_t ( int iFields )
{
m_dStreamers.Resize(iFields);
m_dStreamers.ZeroVec();
m_dLimits.Resize(iFields);
}
~ScopedStreamers_t()
{
for ( auto & i : m_dStreamers )
SafeDelete(i);
}
};
//////////////////////////////////////////////////////////////////////////
// these fields are set once in Setup/SetQuery and are not changed during Build/PackResult,
// so they may be shared among clones
struct SnippetBuilderStatelessMembers_t
{
const CSphIndex * m_pIndex = nullptr;
const SnippetQuerySettings_t * m_pQuerySettings = nullptr;
std::unique_ptr<CSphHTMLStripper> m_pStripper;
std::unique_ptr<QueryParser_i> m_pQueryParser;
TokenizerRefPtr_c m_pTokenizerJson;
std::unique_ptr<XQQuery_t> m_pExtQuery;
DWORD m_eExtQuerySPZ = SPH_SPZ_NONE;
bool m_bSetupCalled = false;
};
class SnippetBuilder_c::Impl_c
{
public:
Impl_c ();
Impl_c* MakeClone() const;
void Setup ( const CSphIndex * pIndex, const SnippetQuerySettings_t & tQuery );
bool SetQuery ( const CSphString & sQuery, bool bIgnoreFields, CSphString & sError );
bool Build ( std::unique_ptr<TextSource_i>& pSource, SnippetResult_t & tRes );
CSphVector<BYTE> PackResult ( SnippetResult_t & tRes, const VecTraits_T<int> & dRequestedFields ) const;
private:
struct ZoneData_t
{
CSphVector<ZonePacked_t> m_dZones;
FunctorZoneInfo_t m_tInfo;
};
struct WeightedPassage_t
{
int m_iId = 0;
int m_iWeight = 0;
};
struct WeightedPassageSort_fn
{
bool IsLess ( const WeightedPassage_t & a, const WeightedPassage_t & b ) const;
};
SharedPtr_t<SnippetBuilderStatelessMembers_t> m_pState;
TokenizerRefPtr_c m_pTokenizer;
TokenizerRefPtr_c m_pQueryTokenizer;
DictRefPtr_c m_pDict;
std::unique_ptr<ISphFieldFilter> m_pFieldFilter;
bool CheckSettings ( CSphString & sError ) const;
const CSphHTMLStripper * GetStripperForText() const;
const CSphHTMLStripper * GetStripperForTokenization() const;
bool DoHighlighting ( TextSource_i & tSource, SnippetResult_t & tRes ) const;
void ExtractPassages ( ScopedStreamers_t & tStreamers, TextSource_i & tSource, const SnippetsDocIndex_c & tContainer, const CSphVector<SphHitMark_t> & dMarked, int iField,
PassageContext_t & tContext, SnippetResult_t & tRes ) const;
void SelectBestPassages ( const SnippetsDocIndex_c & tContainer, const PassageContext_t & tContext, const SnippetLimits_t & tLimits, DWORD uFoundWords, CSphVector<Passage_t> & dPassages ) const;
void HighlightPassages ( ScopedStreamers_t & tStreamers, TextSource_i & tSource, CSphVector<Passage_t> & dPassages, const CSphVector<SphHitMark_t> & dMarked,
const FunctorZoneInfo_t & tZoneInfo, SnippetResult_t & tRes ) const;
void HighlightAll ( ScopedStreamers_t & tStreamers, TextSource_i & tSource, const CSphVector<SphHitMark_t> & dMarked, int iField, SnippetResult_t & tRes ) const;
void HighlightFieldStart ( ScopedStreamers_t & tStreamers, TextSource_i & tSource, int iField, SnippetResult_t & tRes ) const;
void HighlightAnything ( ScopedStreamers_t & tStreamers, TextSource_i & tSource, SnippetResult_t & tRes ) const;
void CollectHits ( ScopedStreamers_t & tStreamers, TextSource_i & tSource, SnippetsDocIndex_c & tContainer, int iSPZ, DWORD & uFoundWords, ZoneData_t & tZodeData, SnippetResult_t & tRes ) const;
void MarkHits ( const SnippetsDocIndex_c & tContainer, CSphVector<SphHitMark_t> & dMarked, const ZoneData_t & tZoneData, SnippetResult_t & tRes ) const;
void SplitSpans ( const SnippetsDocIndex_c & tContainer, CSphVector<SphHitMark_t> & dMarked ) const;
void FoldHitsIntoSpans ( CSphVector<SphHitMark_t> & dMarked ) const;
void FixupQueryLimits ( SnippetLimits_t & tLimit, const SnippetsDocIndex_c & tContainer, DWORD uFoundTerms, CSphString & sWarning ) const;
bool CanHighlightAll ( int iDocLen, const SnippetLimits_t & tLimits ) const;
bool SetupStripperSPZ ( bool bSetupSPZ, CSphString & sError );
void CreateLimits ( ScopedStreamers_t & tStreamers, const TextSource_i & tSource, const SnippetsDocIndex_c & tContainer, DWORD uFoundWords, CSphString & sWarning ) const;
void GetPassageOrder ( const FieldResult_t & tField, CSphVector<WeightedPassage_t> & dPassageOrder ) const;
void PackAsData ( MemoryWriter_c & tWriter, SnippetResult_t & tRes, const VecTraits_T<int> & dRequestedFields ) const;
void PackAsString ( MemoryWriter_c & tWriter, CSphVector<BYTE> & dRes, SnippetResult_t & tRes, const VecTraits_T<int> & dRequestedFields ) const;
private:
Impl_c ( const Impl_c & rhs ); // used for cloning
};
void SnippetBuilder_c::Impl_c::ExtractPassages ( ScopedStreamers_t & tStreamers, TextSource_i & tSource, const SnippetsDocIndex_c & tContainer, const CSphVector<SphHitMark_t> & dMarked, int iField, PassageContext_t & tContext,
SnippetResult_t & tRes ) const
{
assert ( m_pState->m_pIndex && m_pState->m_pQuerySettings );
const CSphIndexSettings & tIndexSettings = m_pState->m_pIndex->GetSettings();
const SnippetQuerySettings_t & tSettings = *m_pState->m_pQuerySettings;
const char * szDoc = (const char*)tSource.GetText(iField).Begin();
int iDocLen = tSource.GetText(iField).GetLength();
std::unique_ptr<TokenFunctor_i> pExtractor = CreatePassageExtractor ( tContainer, tContext, m_pTokenizer, tSettings, tStreamers.m_dLimits[iField], tIndexSettings, szDoc, iDocLen, dMarked, iField, tRes );
tStreamers.m_dStreamers[iField]->Tokenize ( *pExtractor );
}
void SnippetBuilder_c::Impl_c::SelectBestPassages ( const SnippetsDocIndex_c & tContainer, const PassageContext_t & tContext, const SnippetLimits_t & tLimits, DWORD uFoundWords, CSphVector<Passage_t> & dPassages ) const
{
CSphVector<Passage_t> dNewPassages = tContext.SelectBest ( tLimits, *m_pState->m_pQuerySettings, tContainer, uFoundWords );
Passage_t * pPassages = dPassages.AddN ( dNewPassages.GetLength() );
ARRAY_FOREACH ( i, dNewPassages )
pPassages[i] = std::move ( dNewPassages[i] );
}
void SnippetBuilder_c::Impl_c::HighlightPassages ( ScopedStreamers_t & tStreamers, TextSource_i & tSource, CSphVector<Passage_t> & dPassages, const CSphVector<SphHitMark_t> & dMarked,
const FunctorZoneInfo_t & tZoneInfo, SnippetResult_t & tRes ) const
{
// everything should already be highlighted in fields w/o matches
if ( !dPassages.GetLength() )
return;
ARRAY_FOREACH ( iField, tStreamers.m_dStreamers )
{
const char * szDoc = (const char*)tSource.GetText(iField).Begin();
int iDocLen = tSource.GetText(iField).GetLength();
CSphVector<Passage_t*> dFilteredPassages;
ARRAY_FOREACH ( iPassage, dPassages )
if ( dPassages[iPassage].m_iField==iField )
dFilteredPassages.Add ( &dPassages[iPassage] );
if ( !dFilteredPassages.GetLength() )
continue;
std::unique_ptr<TokenFunctor_i> pHighlighter = CreatePassageHighlighter ( dFilteredPassages, m_pTokenizer,
*m_pState->m_pQuerySettings, m_pState->m_pIndex->GetSettings(), szDoc, iDocLen, dMarked, tZoneInfo, iField, tRes );
tStreamers.m_dStreamers[iField]->Tokenize ( *pHighlighter );
}
}
void SnippetBuilder_c::Impl_c::MarkHits ( const SnippetsDocIndex_c & tContainer, CSphVector<SphHitMark_t> & dMarked, const ZoneData_t & tZoneData, SnippetResult_t & tRes ) const
{
const XQQuery_t & tXQQuery = tContainer.GetQuery();
SnippetZoneChecker_c tZoneChecker ( tZoneData.m_dZones, tZoneData.m_tInfo.m_hZones, tXQQuery.m_dZones );
SnippetsFastQwordSetup_c tQwordSetup ( tContainer );
tQwordSetup.SetDict ( m_pDict );
tQwordSetup.m_pWarning = &tRes.m_sError;
tQwordSetup.m_pZoneChecker = &tZoneChecker;
// got a lot of stack allocated variables (up to 30K)
// check that query not overflow stack here
auto iStackNeed = ConsiderStack ( tXQQuery.m_pRoot, tRes.m_sError );
if ( !iStackNeed )
return;
Threads::Coro::Continue ( iStackNeed, [&] {
std::unique_ptr<CSphHitMarker> pMarker ( CSphHitMarker::Create ( tXQQuery.m_pRoot, tQwordSetup ) );
if ( !pMarker )
return;
pMarker->Mark(dMarked);
});
}
void SnippetBuilder_c::Impl_c::SplitSpans ( const SnippetsDocIndex_c & tContainer, CSphVector<SphHitMark_t> & dMarked ) const
{
const CSphVector<CSphVector<DWORD>> & dDocHits = tContainer.GetDocHits();
// we just collected matching spans into dMarked, but!
// certain spans might not match all words within the span
// for instance, (one NEAR/3 two) could return a 5-word span
// but we do have full matching keywords list in tContainer
// so let's post-process and break down such spans
// FIXME! what about phrase spans vs stopwords? they will be split now
ARRAY_FOREACH ( i, dMarked )
{
if ( dMarked[i].m_uSpan==1 )
continue;
CSphVector<int> dMatched;
for ( int j=0; j<(int)dMarked[i].m_uSpan; j++ )
{
// OPTIMZE? we can premerge all dochits vectors once
const int iPos = dMarked[i].m_uPosition + j;
for ( const auto & dDocHit : dDocHits )
if ( dDocHit.BinarySearch(iPos) )
{
dMatched.Add(iPos);
break;
}
}
// this is something that must never happen
// we got a span out of the matching engine that does not match any keywords?!
assert ( dMatched.GetLength() );
if ( !dMatched.GetLength() )
{
dMarked.RemoveFast ( i-- ); // remove, rescan
continue;
}
// append all matching keywords as 1-long spans
ARRAY_FOREACH ( j, dMatched )
{
SphHitMark_t & tMarked = dMarked.Add();
tMarked.m_uPosition = dMatched[j];
tMarked.m_uSpan = 1;
}
// this swaps current span with the last 1-long span we added
// which is by definition okay; so we need not rescan it
dMarked.RemoveFast ( i );
}
dMarked.Uniq();
}
void SnippetBuilder_c::Impl_c::FoldHitsIntoSpans ( CSphVector<SphHitMark_t> & dMarked ) const
{
// we just exploded spans into actual matching hits
// now lets fold marked and matched hits back into contiguous spans
// so that we could highlight such spans instead of every individual word
SphHitMark_t * pOut = dMarked.Begin(); // last emitted folded token
SphHitMark_t * pIn = dMarked.Begin() + 1; // next token to process
SphHitMark_t * pMax = dMarked.Begin() + dMarked.GetLength();
while ( pIn<pMax )
{
if ( pIn->m_uPosition==( pOut->m_uPosition + pOut->m_uSpan ) )
{
pOut->m_uSpan += pIn->m_uSpan;
pIn++;
} else
*++pOut = *pIn++;
}
if ( dMarked.GetLength()>1 )
dMarked.Resize ( pOut - dMarked.Begin() + 1 );
}
void SnippetBuilder_c::Impl_c::HighlightAll ( ScopedStreamers_t & tStreamers, TextSource_i & tSource,
const CSphVector<SphHitMark_t> & dMarked, int iField, SnippetResult_t & tRes ) const
{
const char * szDoc = (const char*)tSource.GetText(iField).Begin();
int iDocLen = tSource.GetText(iField).GetLength();
std::unique_ptr<TokenFunctor_i> pHighlighter = CreateQueryHighlighter ( m_pTokenizer, *m_pState->m_pQuerySettings,
m_pState->m_pIndex->GetSettings(), szDoc, iDocLen, dMarked, iField, tRes );
tStreamers.m_dStreamers[iField]->Tokenize ( *pHighlighter );
}
void SnippetBuilder_c::Impl_c::HighlightFieldStart ( ScopedStreamers_t & tStreamers, TextSource_i & tSource, int iField, SnippetResult_t & tRes ) const
{
assert ( m_pState->m_pQuerySettings );
const SnippetQuerySettings_t & tSettings = *m_pState->m_pQuerySettings;
if ( tSettings.m_bAllowEmpty )
return;
const char * szDoc = (const char*)tSource.GetText(iField).Begin();
int iDocLen = tSource.GetText(iField).GetLength();
int iResultCP = 0;
std::unique_ptr<TokenFunctor_i> pHighlighter = CreateDocStartHighlighter ( m_pTokenizer, tSettings, tStreamers.m_dLimits[iField],
m_pState->m_pIndex->GetSettings(), szDoc, iDocLen, iField, iResultCP, tRes );
tStreamers.m_dStreamers[iField]->Tokenize ( *pHighlighter );
}
void SnippetBuilder_c::Impl_c::HighlightAnything ( ScopedStreamers_t & tStreamers, TextSource_i & tSource, SnippetResult_t & tRes ) const
{
assert ( m_pState->m_pQuerySettings );
const SnippetQuerySettings_t & tSettings = *m_pState->m_pQuerySettings;
if ( tSettings.m_bAllowEmpty )
return;
int iResultCP = 0;
for ( int iField = 0; iField < tSource.GetNumFields(); iField++ )
{
const char * szDoc = (const char*)tSource.GetText(iField).Begin();
int iDocLen = tSource.GetText(iField).GetLength();
std::unique_ptr<TokenFunctor_i> pHighlighter = CreateDocStartHighlighter ( m_pTokenizer, tSettings, tStreamers.m_dLimits[iField], m_pState->m_pIndex->GetSettings(), szDoc, iDocLen, iField, iResultCP, tRes );
tStreamers.m_dStreamers[iField]->Tokenize ( *pHighlighter );
}
}
void SnippetBuilder_c::Impl_c::CollectHits ( ScopedStreamers_t & tStreamers, TextSource_i & tSource, SnippetsDocIndex_c & tContainer,
int iSPZ, DWORD & uFoundWords, ZoneData_t & tZodeData, SnippetResult_t & tRes ) const
{
assert ( m_pState->m_pIndex && m_pState->m_pQuerySettings );
const CSphIndexSettings & tIndexSettings = m_pState->m_pIndex->GetSettings();
const SnippetQuerySettings_t & tQuerySettings = *m_pState->m_pQuerySettings;
for ( int iField = 0; iField < tSource.GetNumFields(); iField++ )
{
const char * szFieldName = tSource.GetFieldName(iField);
tRes.m_dFields[iField].m_sName = szFieldName;
tRes.m_dFields[iField].m_dPassages.Resize(1); // so that non-passage funcs will have something to work on
const char * szDoc = (const char*)tSource.GetText(iField).Begin();
int iDocLen = tSource.GetText(iField).GetLength();
const CSphHTMLStripper * pStripper = GetStripperForTokenization();
// do two passes over document
// 1st pass will tokenize document, match keywords, and store positions into docindex
// 2nd pass will highlight matching positions only (with some matching engine aid)
CacheStreamer_i * pStreamer = CreateCacheStreamer(iDocLen);
tStreamers.m_dStreamers[iField] = pStreamer;
std::unique_ptr<HitCollector_i> pHitCollector = CreateHitCollector ( tContainer, m_pTokenizer, m_pDict, tQuerySettings, tIndexSettings, szDoc, iDocLen, iField, *pStreamer, tZodeData.m_dZones, tZodeData.m_tInfo, tRes );
TokenizeDocument ( *pHitCollector, pStripper, iSPZ );
uFoundWords |= pHitCollector->GetFoundWords();
}
}
bool SnippetBuilder_c::Impl_c::CanHighlightAll ( int iDocLen, const SnippetLimits_t & tLimits ) const
{
assert ( m_pState->m_pQuerySettings );
const SnippetQuerySettings_t & tOpts = *m_pState->m_pQuerySettings;
bool bHighlightAll = ( tLimits.m_iLimit==0 || tLimits.m_iLimit>=iDocLen ) && ( tLimits.m_iLimitWords==0 || tLimits.m_iLimitWords>iDocLen/2 )
&& tOpts.m_ePassageSPZ==SPH_SPZ_NONE;
// might need separate passages
if ( bHighlightAll && tOpts.m_bForcePassages && ( tLimits.m_iLimit!=0 || tLimits.m_iLimitWords!=0 || tLimits.m_iLimitPassages!=0 ) )
bHighlightAll = false;
return bHighlightAll;
}
void SnippetBuilder_c::Impl_c::CreateLimits ( ScopedStreamers_t & tStreamers, const TextSource_i & tSource, const SnippetsDocIndex_c & tContainer, DWORD uFoundWords, CSphString & sWarning ) const
{
assert ( m_pState->m_pQuerySettings );
const SnippetQuerySettings_t & tSettings = *m_pState->m_pQuerySettings;
bool bPerFieldLimits = tSource.TextFromIndex() && tSettings.m_bLimitsPerField;
for ( int iField = 0; iField < tSource.GetNumFields(); iField++ )
{
SnippetLimits_t & tLimit = tStreamers.m_dLimits[iField];
tLimit = tSettings;
if ( bPerFieldLimits )
{
const SnippetLimits_t * pPerFieldLimits = tSettings.m_hPerFieldLimits ( tSource.GetFieldName(iField) );
if ( pPerFieldLimits )
tLimit = *pPerFieldLimits;
}
FixupQueryLimits ( tLimit, tContainer, uFoundWords, sWarning );
}
}
static void MarkFieldsWithHits ( CSphBitvec & dFieldsWithHits, const CSphVector<SphHitMark_t> & dMarked )
{
for ( const auto & i : dMarked )
dFieldsWithHits.BitSet ( HITMAN::GetField(i.m_uPosition) );
}
bool SnippetBuilder_c::Impl_c::DoHighlighting ( TextSource_i & tSource, SnippetResult_t & tRes ) const
{
assert ( m_pState->m_pIndex && m_pState->m_pQuerySettings );
const SnippetQuerySettings_t & tQuerySettings = *m_pState->m_pQuerySettings;
// create query and hit lists container, parse query
SnippetsDocIndex_c tContainer ( *m_pState->m_pExtQuery );
tContainer.ParseQuery ( m_pDict, m_pState->m_eExtQuerySPZ );
ScopedStreamers_t tStreamers ( tSource.GetNumFields() );
int iTotalDocLen = 0;
for ( int iField = 0; iField < tSource.GetNumFields(); iField++ )
iTotalDocLen += tSource.GetText(iField).GetLength();
bool bGlobalHighlightAll = CanHighlightAll ( iTotalDocLen, tQuerySettings );
int iSPZ = ConvertSPZ ( m_pState->m_eExtQuerySPZ | ( bGlobalHighlightAll ? 0 : tQuerySettings.m_ePassageSPZ ) );
ZoneData_t tZodeData;
tRes.m_dFields.Resize ( tSource.GetNumFields() );
DWORD uFoundWords = 0;
CollectHits ( tStreamers, tSource, tContainer, iSPZ, uFoundWords, tZodeData, tRes );
for ( auto & i : tStreamers.m_dStreamers )
i->SetZoneInfo ( tZodeData.m_tInfo );
CreateLimits ( tStreamers, tSource, tContainer, uFoundWords, tRes.m_sWarning );
CSphVector<SphHitMark_t> dMarked;
MarkHits ( tContainer, dMarked, tZodeData, tRes );
SplitSpans ( tContainer, dMarked );
FoldHitsIntoSpans(dMarked);
CSphBitvec dFieldsWithHits ( tSource.GetNumFields() );
MarkFieldsWithHits ( dFieldsWithHits, dMarked );
if ( !dMarked.GetLength() && !tQuerySettings.m_bPackFields )
{
HighlightAnything ( tStreamers, tSource, tRes );
return true;
}
// we either use global passage limits (and global tPassageContext)
// or we create new context every time we highlight a field
bool bPerFieldLimits = tSource.TextFromIndex() && tQuerySettings.m_bLimitsPerField;
CSphVector<Passage_t> dPassages;
PassageContext_t tGlobalPassageContext;
for ( int iField = 0; iField < tSource.GetNumFields(); iField++ )
{
bool bHighlightAll = bPerFieldLimits ? CanHighlightAll ( tSource.GetText(iField).GetLength(), tStreamers.m_dLimits[iField] ) : bGlobalHighlightAll;
if ( !dFieldsWithHits.BitGet(iField) )
{
// NOTE: this uses only per-field limits. no global limits here
if ( tQuerySettings.m_bPackFields )
HighlightFieldStart ( tStreamers, tSource, iField, tRes );
}
else if ( bHighlightAll )
HighlightAll ( tStreamers, tSource, dMarked, iField, tRes );
else
{
PassageContext_t tPerFieldContext;
PassageContext_t & tCurContext = bPerFieldLimits ? tPerFieldContext : tGlobalPassageContext;
ExtractPassages ( tStreamers, tSource, tContainer, dMarked, iField, tCurContext, tRes );
if ( bPerFieldLimits )
SelectBestPassages ( tContainer, tPerFieldContext, tStreamers.m_dLimits[iField], uFoundWords, dPassages );
}
}
if ( !bPerFieldLimits )
SelectBestPassages ( tContainer, tGlobalPassageContext, tQuerySettings, uFoundWords, dPassages );
HighlightPassages ( tStreamers, tSource, dPassages, dMarked, tZodeData.m_tInfo, tRes );
return true;
}
//////////////////////////////////////////////////////////////////////////
class StringSourceTraits_c
{
public:
void PrepareText ( const VecTraits_T<BYTE> & dSourceText, CSphVector<BYTE> & dDestText, ISphFieldFilter * pFilter,
const CSphHTMLStripper * pStripper, bool & bUseOriginal ) const;
};
void StringSourceTraits_c::PrepareText ( const VecTraits_T<BYTE> & dSourceText, CSphVector<BYTE> & dDestText,
ISphFieldFilter * pFilter, const CSphHTMLStripper * pStripper, bool & bUseOriginal ) const
{
if ( !pFilter && !pStripper )
return;
if ( pFilter )
{
int iGot;
if ( bUseOriginal )
iGot = pFilter->Apply ( dSourceText, dDestText, false );
else
{
CSphVector<BYTE> dTmp;
dTmp.Append ( dSourceText );
iGot = pFilter->Apply ( dTmp, dDestText, false );
}
if ( iGot )
{
dDestText.Resize(iGot);
bUseOriginal = false;
}
}
if ( pStripper )
{
if ( bUseOriginal )
{
dDestText.Resize(0);
dDestText.Append ( dSourceText );
bUseOriginal = false;
}
if ( !( dDestText.IsEmpty () || dDestText.Last ()=='\0' ) )
dDestText.Add('\0');
if ( !dDestText.IsEmpty() )
{
pStripper->Strip ( dDestText.Begin() );
dDestText.Resize ( (int) strlen ( (const char*)dDestText.Begin() ) );
}
}
}
//////////////////////////////////////////////////////////////////////////
class TextSourceString_c : public TextSource_i, public StringSourceTraits_c
{
public:
TextSourceString_c() {}
TextSourceString_c ( const VecTraits_T<const BYTE> & dString );
bool PrepareText ( ISphFieldFilter * pFilter, const CSphHTMLStripper * pStripper, CSphString & sError ) override;
VecTraits_T<BYTE> GetText ( int iField ) const final;
int GetNumFields() const final { return 1; }
const char * GetFieldName ( int iField ) const final { return ""; }
bool TextFromIndex() const final;
protected:
VecTraits_T<BYTE> m_dSourceText; // this holds pointer to original text
CSphVector<BYTE> m_dBuffer; // this holds text modified by filters/stripper (if any)
bool m_bUseOriginal = true; // whether to use original or modified text
};
TextSourceString_c::TextSourceString_c ( const VecTraits_T<const BYTE> & dString )
: m_dSourceText ( dString )
{}
bool TextSourceString_c::PrepareText ( ISphFieldFilter * pFilter, const CSphHTMLStripper * pStripper, CSphString & sError )
{
StringSourceTraits_c::PrepareText ( m_dSourceText, m_dBuffer, pFilter, pStripper, m_bUseOriginal );
return true;
}
VecTraits_T<BYTE> TextSourceString_c::GetText ( int iField ) const
{
assert ( !iField );
if ( m_bUseOriginal )
return m_dSourceText;
return m_dBuffer;
}
bool TextSourceString_c::TextFromIndex() const
{
return false;
}
//////////////////////////////////////////////////////////////////////////
class TextSourceFile_c : public TextSourceString_c
{
public:
TextSourceFile_c ( const VecTraits_T<const BYTE> & dFilename );
bool PrepareText ( ISphFieldFilter * pFilter, const CSphHTMLStripper * pStripper, CSphString & sError ) final;
private:
CSphString m_sFile;
bool LoadFile ( CSphString & sError );
};
TextSourceFile_c::TextSourceFile_c ( const VecTraits_T<const BYTE> & dFilename )
{
m_sFile.SetBinary ( (const char*)dFilename.Begin(), dFilename.GetLength() );
}
bool TextSourceFile_c::LoadFile ( CSphString & sError )
{
CSphAutofile tFile;
CSphString sFilename;
sFilename.SetSprintf ( "%s%s", g_sSnippetsFilePrefix.cstr(), m_sFile.scstr() );
if ( !TestEscaping ( g_sSnippetsFilePrefix, sFilename ) )
{
sError.SetSprintf( "File '%s' escapes '%s' scope", sFilename.scstr(), g_sSnippetsFilePrefix.scstr());
return false;
}
if ( m_sFile.IsEmpty() )
{
sError.SetSprintf ( "snippet file name is empty" );
return false;
}
if ( !sFilename.IsEmpty () && tFile.Open ( sFilename.cstr(), SPH_O_READ, sError )<0 )
return false;
// will this ever trigger? time will tell; email me if it does!
if ( tFile.GetSize()+1>=(SphOffset_t)INT_MAX )
{
sError.SetSprintf ( "%s too big for snippet (over 2 GB)", sFilename.cstr() );
return false;
}
auto iFileSize = (int)tFile.GetSize();
if ( iFileSize<0 )
return false;
m_dBuffer.Resize(iFileSize);
if ( !tFile.Read ( m_dBuffer.Begin(), iFileSize, sError ) )
return false;
m_bUseOriginal = false;
return true;
}
bool TextSourceFile_c::PrepareText ( ISphFieldFilter * pFilter, const CSphHTMLStripper * pStripper, CSphString & sError )
{
if ( !LoadFile(sError) )
return false;
return TextSourceString_c::PrepareText ( pFilter, pStripper, sError );
}
//////////////////////////////////////////////////////////////////////////
class TextSourceFields_c : public TextSource_i, public StringSourceTraits_c
{
public:
TextSourceFields_c ( const CSphVector<FieldSource_t> & dAllFields );
bool PrepareText ( ISphFieldFilter * pFilter, const CSphHTMLStripper * pStripper, CSphString & sError ) final;
VecTraits_T<BYTE> GetText ( int iField ) const final;
int GetNumFields() const final { return m_dFields.GetLength(); }
const char * GetFieldName ( int iField ) const final { return m_dFields[iField].m_sName.cstr(); }
bool TextFromIndex() const final { return true; }
private:
const CSphVector<FieldSource_t> & m_dFields;
CSphVector<CSphVector<BYTE>> m_dModifiedFields;
CSphBitvec m_tUseOriginal;
};
TextSourceFields_c::TextSourceFields_c ( const CSphVector<FieldSource_t> & dAllFields )
: m_dFields ( dAllFields )
, m_dModifiedFields ( dAllFields.GetLength() )
, m_tUseOriginal ( dAllFields.GetLength() )
{
m_tUseOriginal.Set();
}
bool TextSourceFields_c::PrepareText ( ISphFieldFilter * pFilter, const CSphHTMLStripper * pStripper, CSphString & sError )
{
ARRAY_FOREACH ( i, m_dFields )
{
bool bUseOriginal = true;
StringSourceTraits_c::PrepareText ( m_dFields[i].m_dData, m_dModifiedFields[i], pFilter, pStripper, bUseOriginal );
if ( !bUseOriginal )
m_tUseOriginal.BitClear(i);
}
return true;
}
VecTraits_T<BYTE> TextSourceFields_c::GetText ( int iField ) const
{
if ( m_tUseOriginal.BitGet(iField) )
return m_dFields[iField].m_dData;
return m_dModifiedFields[iField];
}
//////////////////////////////////////////////////////////////////////////
static std::unique_ptr<TextSource_i> CreateTextSourceFile ( const VecTraits_T<const BYTE> & dFilename )
{
return std::make_unique<TextSourceFile_c>(dFilename);
}
static std::unique_ptr<TextSource_i> CreateTextSourceString ( const VecTraits_T<const BYTE> & dString )
{
return std::make_unique<TextSourceString_c>(dString);
}
std::unique_ptr<TextSource_i> CreateSnippetSource ( DWORD uFilesMode, const BYTE * pSource, int iLen )
{
if ( uFilesMode )
return CreateTextSourceFile ( VecTraits_T<const BYTE>(pSource, iLen) );
return CreateTextSourceString ( VecTraits_T<const BYTE>(pSource, iLen) );
}
std::unique_ptr<TextSource_i> CreateHighlightSource ( const CSphVector<FieldSource_t> & dAllFields )
{
return std::make_unique<TextSourceFields_c> ( dAllFields );
}
/////////////////////////////////////////////////////////////////////////////
bool SnippetBuilder_c::Impl_c::CheckSettings ( CSphString & sError ) const
{
assert ( m_pState->m_pQuerySettings );
const SnippetQuerySettings_t & tOpt = *m_pState->m_pQuerySettings;
if ( tOpt.m_sStripMode=="retain" && !( tOpt.m_iLimit==0 && tOpt.m_iLimitPassages==0 && tOpt.m_iLimitWords==0 ) )
{
sError = "html_strip_mode=retain requires that all limits are zero";
return false;
}
return true;
}
const CSphHTMLStripper * SnippetBuilder_c::Impl_c::GetStripperForText() const
{
assert( m_pState->m_pQuerySettings);
if ( m_pState->m_pQuerySettings->m_sStripMode=="strip" || m_pState->m_pQuerySettings->m_sStripMode=="index" )
return m_pState->m_pStripper.get();
return nullptr;
}
const CSphHTMLStripper * SnippetBuilder_c::Impl_c::GetStripperForTokenization() const
{
assert( m_pState->m_pQuerySettings);
if ( m_pState->m_pQuerySettings->m_sStripMode!="retain" )
return nullptr;
return m_pState->m_pStripper.get();
}
void SnippetBuilder_c::Impl_c::FixupQueryLimits ( SnippetLimits_t & tLimit,const SnippetsDocIndex_c & tContainer,
DWORD uFoundTerms, CSphString & sWarning ) const
{
assert( m_pState->m_pQuerySettings);
const SnippetQuerySettings_t & tSettings = *m_pState->m_pQuerySettings;
if ( tLimit.m_iLimitWords && tSettings.m_bForceAllWords )
{
int iNewWordLimit = 0;
if ( !uFoundTerms )
iNewWordLimit = tContainer.GetNumTerms();
else
{
DWORD uFound = uFoundTerms;
for ( ; uFound; iNewWordLimit++ )
uFound &= uFound-1;
}
if ( iNewWordLimit > tLimit.m_iLimitWords )
{
sWarning.SetSprintf ( "number of query terms (%d) is greater than the word limit setting (%d): limit increased", iNewWordLimit, tLimit.m_iLimitWords );
tLimit.m_iLimitWords = iNewWordLimit;
}
}
if ( tLimit.m_iLimit )
{
int iTotalLen = 0;
int iMaxLen = 0;
int nTermsUsed = 0;
for ( int i = 0; i < tContainer.GetNumTerms(); i++ )
{
int iLenCP = tContainer.GetTermWeight(i);
if ( !uFoundTerms || ( uFoundTerms && ( uFoundTerms & ( 1UL << i ) ) ) )
{
iTotalLen += iLenCP;
iMaxLen = Max ( iMaxLen, iLenCP );
nTermsUsed++;
}
}
int iNewLimit = iMaxLen;
if ( tSettings.m_bForceAllWords )
iNewLimit = iTotalLen+nTermsUsed-1;
if ( iNewLimit > tLimit.m_iLimit )
{
sWarning.SetSprintf ( "query length (%d) is greater than the limit setting (%d): limit increased", iNewLimit, tLimit.m_iLimit );
tLimit.m_iLimit = iNewLimit;
}
}
}
CSphString g_sSnippetsFilePrefix { "" };
bool SnippetBuilder_c::Impl_c::Build ( std::unique_ptr<TextSource_i>& pSource, SnippetResult_t & tRes )
{
assert ( m_pState->m_pIndex && m_pState->m_pQuerySettings );
if ( !CheckSettings ( tRes.m_sError ) )
return false;
assert ( pSource );
if ( !pSource->PrepareText ( m_pFieldFilter.get(), GetStripperForText(), tRes.m_sError ) )
return false;
DoHighlighting ( *pSource, tRes );
return true;
}
void SnippetBuilder_c::Impl_c::PackAsData ( MemoryWriter_c & tWriter, SnippetResult_t & tRes,
const VecTraits_T<int> & dRequestedFields ) const
{
CSphVector<WeightedPassage_t> dPassageOrder;
tWriter.ZipInt ( dRequestedFields.GetLength() );
for ( auto i : dRequestedFields )
{
const FieldResult_t & tField = tRes.m_dFields[i];
tWriter.PutString ( tField.m_sName );
GetPassageOrder ( tField, dPassageOrder );
tWriter.ZipInt ( tField.m_dPassages.GetLength() );
for ( const auto & tWeighted : dPassageOrder )
{
PassageResult_t & tPassage = tField.m_dPassages[tWeighted.m_iId];
tWriter.PutByte ( tPassage.m_bStartSeparator ? 1 : 0 );
tWriter.PutByte ( tPassage.m_bEndSeparator ? 1 : 0 );
tWriter.ZipInt ( tPassage.m_dText.GetLength() );
tWriter.PutBytes ( tPassage.m_dText.Begin(), tPassage.m_dText.GetLength() );
}
}
}
bool SnippetBuilder_c::Impl_c::WeightedPassageSort_fn::IsLess ( const WeightedPassage_t & a, const WeightedPassage_t & b ) const
{
if ( a.m_iWeight==b.m_iWeight )
return a.m_iId<b.m_iId;
return a.m_iWeight>b.m_iWeight;
}
void SnippetBuilder_c::Impl_c::GetPassageOrder ( const FieldResult_t & tField, CSphVector<WeightedPassage_t> & dPassageOrder ) const
{
dPassageOrder.Resize ( tField.m_dPassages.GetLength() );
ARRAY_FOREACH ( i, tField.m_dPassages )
{
dPassageOrder[i].m_iId = i;
dPassageOrder[i].m_iWeight = tField.m_dPassages[i].m_iWeight;
}
assert( m_pState->m_pQuerySettings);
if ( m_pState->m_pQuerySettings->m_bWeightOrder )
dPassageOrder.Sort ( WeightedPassageSort_fn() );
}
void SnippetBuilder_c::Impl_c::PackAsString ( MemoryWriter_c & tWriter, CSphVector<BYTE> & dRes, SnippetResult_t & tRes,
const VecTraits_T<int> & dRequestedFields ) const
{
if ( tRes.m_dFields.GetLength()==1 && tRes.m_dFields[0].m_dPassages.GetLength()==1 && !tRes.m_dFields[0]
.m_dPassages[0].m_bStartSeparator && !tRes.m_dFields[0].m_dPassages[0].m_bEndSeparator )
{
dRes = std::move ( tRes.m_dFields[0].m_dPassages[0].m_dText );
return;
}
assert( m_pState->m_pQuerySettings);
const SnippetQuerySettings_t & tOpts = *m_pState->m_pQuerySettings;
CSphVector<WeightedPassage_t> dPassageOrder;
int iChunkSeparatorLen = tOpts.m_sChunkSeparator.Length();
int iFieldSeparatorLen = tOpts.m_sFieldSeparator.Length();
ARRAY_FOREACH ( i, dRequestedFields )
{
if ( dRes.GetLength() )
{
bool bHasMoreData = false;
for ( int j = i; j < dRequestedFields.GetLength(); j++ )
{
const FieldResult_t & tNextField = tRes.m_dFields[dRequestedFields[j]];
if ( tNextField.m_dPassages.GetLength() )
{
bHasMoreData = true;
break;
}
}
if ( bHasMoreData )
tWriter.PutBytes ( m_pState->m_pQuerySettings->m_sFieldSeparator.cstr(), iFieldSeparatorLen );
}
const FieldResult_t & tField = tRes.m_dFields[dRequestedFields[i]];
GetPassageOrder ( tField, dPassageOrder );
ARRAY_FOREACH ( iWeighted, dPassageOrder )
{
PassageResult_t & tPassage = tField.m_dPassages[dPassageOrder[iWeighted].m_iId];
if ( !iWeighted && tPassage.m_bStartSeparator )
tWriter.PutBytes ( tOpts.m_sChunkSeparator.cstr(), iChunkSeparatorLen );
tWriter.PutBytes ( tPassage.m_dText.Begin(), tPassage.m_dText.GetLength() );
if ( iWeighted<dPassageOrder.GetLength()-1 || tPassage.m_bEndSeparator )
tWriter.PutBytes ( tOpts.m_sChunkSeparator.cstr(), iChunkSeparatorLen );
}
}
}
CSphVector<BYTE> SnippetBuilder_c::Impl_c::PackResult ( SnippetResult_t & tRes, const VecTraits_T<int> & dRequestedFields ) const
{
assert( m_pState->m_pQuerySettings);
// 1st pass: remove redundant empty passages (if any)
for ( auto & tField : tRes.m_dFields )
if ( tField.m_dPassages.GetLength()==1 && !tField.m_dPassages[0].m_dText.GetLength() )
tField.m_dPassages.Resize(0);
CSphVector<BYTE> dRes;
MemoryWriter_c tWriter(dRes);
if ( m_pState->m_pQuerySettings->m_bPackFields )
PackAsData ( tWriter, tRes, dRequestedFields );
else
PackAsString ( tWriter, dRes, tRes, dRequestedFields );
return dRes;
}
static DWORD CollectQuerySPZ ( const XQNode_t * pNode )
{
if ( !pNode )
return SPH_SPZ_NONE;
DWORD eSPZ = SPH_SPZ_NONE;
if ( pNode->GetOp ()==SPH_QUERY_SENTENCE )
eSPZ |= SPH_SPZ_SENTENCE;
else if ( pNode->GetOp ()==SPH_QUERY_PARAGRAPH )
eSPZ |= SPH_SPZ_PARAGRAPH;
ARRAY_FOREACH ( i, pNode->m_dChildren )
eSPZ |= CollectQuerySPZ ( pNode->m_dChildren[i] );
return eSPZ;
}
bool SnippetBuilder_c::Impl_c::SetupStripperSPZ ( bool bSetupSPZ, CSphString & sError )
{
m_pState->m_pStripper.reset();
if ( bSetupSPZ && ( !m_pTokenizer->EnableSentenceIndexing(sError) || !m_pTokenizer->EnableZoneIndexing(sError) ) )
return false;
assert ( m_pState->m_pIndex && m_pState->m_pQuerySettings );
const CSphIndexSettings & tIndexSettings = m_pState->m_pIndex->GetSettings();
const SnippetQuerySettings_t & q = *m_pState->m_pQuerySettings;
if ( q.m_sStripMode=="strip" || q.m_sStripMode=="retain" || ( q.m_sStripMode=="index" && tIndexSettings.m_bHtmlStrip ) )
{
// don't strip HTML markup in 'retain' mode - proceed zones only
m_pState->m_pStripper = std::make_unique<CSphHTMLStripper> ( q.m_sStripMode!="retain" );
if ( q.m_sStripMode=="index" )
{
if ( !m_pState->m_pStripper->SetIndexedAttrs ( tIndexSettings.m_sHtmlIndexAttrs.cstr (), sError ) ||
!m_pState->m_pStripper->SetRemovedElements ( tIndexSettings.m_sHtmlRemoveElements.cstr (), sError ) )
{
sError.SetSprintf ( "HTML stripper config error: %s", sError.cstr () );
return false;
}
}
if ( bSetupSPZ )
m_pState->m_pStripper->EnableParagraphs();
// handle zone(s) in special mode only when passage_boundary enabled
if ( bSetupSPZ && !m_pState->m_pStripper->SetZones ( tIndexSettings.m_sZones.cstr (), sError ) )
{
sError.SetSprintf ( "HTML stripper config error: %s", sError.cstr () );
return false;
}
}
return true;
}
void SnippetBuilder_c::Impl_c::Setup ( const CSphIndex * pIndex, const SnippetQuerySettings_t & tSettings )
{
assert(pIndex);
assert(!m_pState->m_pQuerySettings);
assert (!m_pState->m_bSetupCalled && "Should not be called for clone");
m_pState->m_pIndex = pIndex;
m_pState->m_pQuerySettings = &tSettings;
m_pDict = GetStatelessDict ( pIndex->GetDictionary () );
const CSphIndexSettings & tIndexSettings = m_pState->m_pIndex->GetSettings();
// OPTIMIZE! do a lightweight indexing clone here
m_pTokenizer = pIndex->GetTokenizer()->Clone ( SPH_CLONE_INDEX );
if ( tIndexSettings.m_uAotFilterMask )
sphAotTransformFilter ( m_pTokenizer, m_pDict, tIndexSettings.m_bIndexExactWords, tIndexSettings.m_uAotFilterMask );
m_pQueryTokenizer = pIndex->GetQueryTokenizer()->Clone ( SPH_CLONE );
// setup exact dictionary if needed
if ( tIndexSettings.m_bIndexExactWords )
SetupExactDict ( m_pDict );
if ( tSettings.m_bJsonQuery )
{
bool bWordDict = m_pDict->GetSettings().m_bWordDict;
// caveat: here we clone from Tokenizer, not from QueryTokenizer, as last was cloned as non-json, and so, includes different extra symbols.
m_pState->m_pTokenizerJson = sphCloneAndSetupQueryTokenizer ( pIndex->GetTokenizer(), pIndex->IsStarDict ( bWordDict ), tIndexSettings.m_bIndexExactWords, true );
m_pState->m_pQueryParser = sphCreateJsonQueryParser();
}
else
m_pState->m_pQueryParser = sphCreatePlainQueryParser();
if ( pIndex->GetFieldFilter() )
m_pFieldFilter = pIndex->GetFieldFilter()->Clone();
// adjust tokenizer for markup-retaining mode
if ( tSettings.m_sStripMode=="retain" )
m_pTokenizer->AddSpecials ( "<" );
m_pState->m_bSetupCalled = true;
}
bool SnippetBuilder_c::Impl_c::SetQuery ( const CSphString & sQuery, bool bIgnoreFields, CSphString & sError )
{
assert( m_pState->m_pIndex);
assert( m_pState->m_pQuerySettings);
CSphVector<BYTE> dFiltered;
const BYTE * szModifiedQuery = (const BYTE *)sQuery.cstr();
if ( m_pFieldFilter && szModifiedQuery && m_pFieldFilter->Apply ( szModifiedQuery, dFiltered, true ) )
szModifiedQuery = dFiltered.Begin();
m_pState->m_pExtQuery = std::make_unique<XQQuery_t>();
const CSphIndexSettings & tIndexSettings = m_pState->m_pIndex->GetSettings();
// OPTIMIZE? double lightweight clone here? but then again it's lightweight
if ( !m_pState->m_pQueryParser->ParseQuery ( *m_pState->m_pExtQuery, (const char*)szModifiedQuery, nullptr,
m_pQueryTokenizer, m_pState->m_pTokenizerJson, &m_pState->m_pIndex->GetMatchSchema(), m_pDict, tIndexSettings, &m_pState->m_pIndex->GetMorphFields() ) )
{
sError = m_pState->m_pExtQuery->m_sParseError;
return false;
}
if ( bIgnoreFields && m_pState->m_pExtQuery->m_pRoot )
m_pState->m_pExtQuery->m_pRoot->ClearFieldMask();
m_pState->m_eExtQuerySPZ = SPH_SPZ_NONE;
m_pState->m_eExtQuerySPZ |= CollectQuerySPZ ( m_pState->m_pExtQuery->m_pRoot );
if ( m_pState->m_pExtQuery->m_dZones.GetLength () )
m_pState->m_eExtQuerySPZ |= SPH_SPZ_ZONE;
TransformAotFilter ( m_pState->m_pExtQuery->m_pRoot, m_pDict->GetWordforms(), tIndexSettings );
bool bSetupSPZ = m_pState->m_pQuerySettings->m_ePassageSPZ!=SPH_SPZ_NONE
|| m_pState->m_eExtQuerySPZ!=SPH_SPZ_NONE
|| m_pState->m_pQuerySettings->m_sStripMode=="retain";
return SetupStripperSPZ ( bSetupSPZ, sError );
}
// check whether filepath from sPath does not escape area of sPrefix
bool TestEscaping( const CSphString& sPrefix, const CSphString& sPath )
{
if ( sPrefix.IsEmpty() || sPrefix==sPath )
return true;
auto sNormalized = sphNormalizePath( sPath );
return sPrefix==sNormalized.SubString( 0, sPrefix.Length());
}
ESphSpz GetPassageBoundary ( const CSphString & sPassageBoundaryMode )
{
if ( sPassageBoundaryMode.IsEmpty() )
return SPH_SPZ_NONE;
ESphSpz eSPZ = SPH_SPZ_NONE;
if ( sPassageBoundaryMode=="sentence" )
eSPZ = SPH_SPZ_SENTENCE;
else if ( sPassageBoundaryMode=="paragraph" )
eSPZ = SPH_SPZ_PARAGRAPH;
else if ( sPassageBoundaryMode=="zone" )
eSPZ = SPH_SPZ_ZONE;
return eSPZ;
}
const char * PassageBoundarySz ( ESphSpz eBoundary )
{
switch ( eBoundary )
{
case SPH_SPZ_SENTENCE: return "sentence";
case SPH_SPZ_PARAGRAPH: return "paragraph";
case SPH_SPZ_ZONE: return "zone";
default: return "";
}
}
bool sphCheckOptionsSPZ ( const SnippetQuerySettings_t & q, ESphSpz eMode, CSphString & sError )
{
if ( q.m_ePassageSPZ )
{
if ( q.m_iAround==0 )
{
sError.SetSprintf ( "invalid combination of snippet_boundary=%s and around=%d", PassageBoundarySz(eMode), q.m_iAround );
return false;
} else if ( q.m_bUseBoundaries )
{
sError.SetSprintf ( "invalid combination of snippet_boundary=%s and use_boundaries", PassageBoundarySz(eMode) );
return false;
}
}
if ( q.m_bEmitZones )
{
if ( q.m_ePassageSPZ!=SPH_SPZ_ZONE )
{
sError.SetSprintf ( "invalid combination of snippet_boundary=%s and emit_zones", PassageBoundarySz(eMode) );
return false;
}
if ( !( q.m_sStripMode=="strip" || q.m_sStripMode=="index" ) )
{
sError.SetSprintf ( "invalid combination of strip=%s and emit_zones", q.m_sStripMode.cstr() );
return false;
}
}
return true;
}
SnippetResult_t UnpackSnippetData ( ByteBlob_t dData )
{
SnippetResult_t tRes;
if ( IsEmpty ( dData ) )
return tRes;
MemoryReader_c tReader ( dData );
tRes.m_dFields.Resize ( tReader.UnzipInt() );
for ( auto & tField : tRes.m_dFields )
{
tField.m_sName = tReader.GetString();
tField.m_dPassages.Resize ( tReader.UnzipInt() );
for ( auto & tPassage : tField.m_dPassages )
{
tPassage.m_bStartSeparator = !!tReader.GetVal<BYTE>();
tPassage.m_bEndSeparator = !!tReader.GetVal<BYTE>();
tPassage.m_dText.Resize ( tReader.UnzipInt() );
tReader.GetBytes ( tPassage.m_dText.Begin(), tPassage.m_dText.GetLength() );
}
}
return tRes;
}
SnippetBuilder_c::Impl_c::Impl_c ()
: m_pState { new SnippetBuilderStatelessMembers_t }
{}
SnippetBuilder_c::Impl_c::Impl_c ( const SnippetBuilder_c::Impl_c & rhs )
: m_pState { rhs.m_pState }
{
m_pTokenizer = rhs.m_pTokenizer->Clone ( SPH_CLONE );
m_pQueryTokenizer = rhs.m_pQueryTokenizer->Clone ( SPH_CLONE );
m_pDict = GetStatelessDict ( rhs.m_pDict );
if ( rhs.m_pFieldFilter )
m_pFieldFilter = rhs.m_pFieldFilter->Clone ();
}
SnippetBuilder_c::Impl_c * SnippetBuilder_c::Impl_c::MakeClone () const
{
return new SnippetBuilder_c::Impl_c ( *this );
}
SnippetBuilder_c::SnippetBuilder_c ( const SnippetBuilder_c & rhs )
{
m_pImpl = rhs.m_pImpl->MakeClone();
}
SnippetBuilder_c::SnippetBuilder_c()
{
m_pImpl = new SnippetBuilder_c::Impl_c;
}
SnippetBuilder_c* SnippetBuilder_c::MakeClone() const
{
assert ( m_pImpl );
return new SnippetBuilder_c ( *this );
}
SnippetBuilder_c::~SnippetBuilder_c()
{
SafeDelete ( m_pImpl );
}
void SnippetBuilder_c::Setup ( const CSphIndex * pIndex, const SnippetQuerySettings_t & tQuery )
{
assert ( m_pImpl );
m_pImpl->Setup ( pIndex, tQuery );
}
bool SnippetBuilder_c::SetQuery ( const CSphString & sQuery, bool bIgnoreFields, CSphString & sError )
{
assert ( m_pImpl );
return m_pImpl->SetQuery ( sQuery, bIgnoreFields, sError );
}
bool SnippetBuilder_c::Build ( std::unique_ptr<TextSource_i>& pSource, SnippetResult_t & tRes )
{
assert ( m_pImpl );
return m_pImpl->Build ( pSource, tRes );
}
CSphVector<BYTE> SnippetBuilder_c::PackResult ( SnippetResult_t & tRes, const VecTraits_T<int> & dRequestedFields ) const
{
assert ( m_pImpl );
return m_pImpl->PackResult ( tRes, dRequestedFields );
}
| 52,670
|
C++
|
.cpp
| 1,256
| 39.34793
| 226
| 0.717523
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,933
|
columnarrt.cpp
|
manticoresoftware_manticoresearch/src/columnarrt.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "columnarrt.h"
#include "fileio.h"
#include "memio.h"
#include "attribute.h"
#include "schema/schema.h"
#include "columnarmisc.h"
template <typename T>
static std::pair<T,T> GetLengthOffset ( const CSphVector<T> & dLengths, RowID_t tRowID )
{
T iLength = dLengths[tRowID];
T iOffset = 0;
if ( tRowID>0 )
{
iOffset = dLengths[tRowID-1];
iLength -= iOffset;
}
return {iLength, iOffset};
}
template <typename T>
static T GetLength ( const CSphVector<T> & dLengths, RowID_t tRowID )
{
T iLength = dLengths[tRowID];
if ( tRowID>0 )
iLength -= dLengths[tRowID-1];
return iLength;
}
/////////////////////////////////////////////////////////////////////
class ColumnarAttrRT_i
{
public:
virtual ~ColumnarAttrRT_i() = default;
virtual void AddDoc ( SphAttr_t tAttr ) = 0;
virtual void AddDoc ( const BYTE * pData, int iLength ) = 0;
virtual void AddDoc ( const int64_t * pData, int iLength ) = 0;
virtual void Save ( MemoryWriter_c & tWriter ) const = 0;
virtual void Save ( Writer_i & tWriter ) const = 0;
virtual void Load ( MemoryReader_c & tReader ) = 0;
virtual void Load ( CSphReader & tReader ) = 0;
virtual int64_t AllocatedBytes() const = 0;
virtual std::unique_ptr<columnar::Iterator_i> CreateIterator() const = 0;
virtual common::AttrType_e GetType() const = 0;
};
class ColumnarAttrRT_c : public ColumnarAttrRT_i
{
public:
ColumnarAttrRT_c ( ESphAttr eType ) : m_eType(eType) {}
void AddDoc ( SphAttr_t tAttr ) override { assert ( 0 && "Unsupported type" ); }
void AddDoc ( const BYTE * pData, int iLength ) override { assert ( 0 && "Unsupported type" ); }
void AddDoc ( const int64_t * pData, int iLength ) override { assert ( 0 && "Unsupported type" ); }
common::AttrType_e GetType() const override { return ToColumnarType ( m_eType, ROWITEM_BITS ); }
protected:
ESphAttr m_eType = SPH_ATTR_NONE;
};
class ColumnarIterator_RT_c : public columnar::Iterator_i
{
public:
int64_t Get ( uint32_t tRowID ) override { assert ( 0 && "Unsupported function" ); return 0; }
void Fetch ( const util::Span_T<uint32_t> & dRowIDs, util::Span_T<int64_t> & dValues ) override { assert ( 0 && "Unsupported function" ); }
int Get ( uint32_t tRowID, const uint8_t * & pData ) override { assert ( 0 && "Unsupported function" ); return 0; }
uint8_t * GetPacked ( uint32_t tRowID ) override { assert ( 0 && "Unsupported function" ); return 0; }
int GetLength ( uint32_t tRowID ) override { assert ( 0 && "Unsupported function" ); return 0; }
void AddDesc ( std::vector<common::IteratorDesc_t> & dDesc ) const override {}
};
/////////////////////////////////////////////////////////////////////
template<typename T>
class ColumnarIterator_Int_T : public ColumnarIterator_RT_c
{
public:
ColumnarIterator_Int_T ( const CSphVector<T> & dValues ) : m_dValues ( dValues ) {}
int64_t Get ( uint32_t tRowID ) override { return m_dValues[tRowID]; }
void Fetch ( const util::Span_T<uint32_t> & dRowIDs, util::Span_T<int64_t> & dValues ) override;
private:
const CSphVector<T> & m_dValues;
};
template<typename T>
void ColumnarIterator_Int_T<T>::Fetch ( const util::Span_T<uint32_t> & dRowIDs, util::Span_T<int64_t> & dValues )
{
uint32_t * pRowID = dRowIDs.begin();
uint32_t * pRowIDEnd = dRowIDs.end();
int64_t * pValue = dValues.begin();
while ( pRowID<pRowIDEnd )
*pValue++ = m_dValues[*pRowID++];
}
/////////////////////////////////////////////////////////////////////
template<typename T>
class ColumnarAttr_Int_T : public ColumnarAttrRT_c
{
public:
ColumnarAttr_Int_T ( ESphAttr eType, int iBits );
void AddDoc ( SphAttr_t tAttr ) override { m_dValues.Add ( ( (T)tAttr ) & m_uMask ); }
void Save ( MemoryWriter_c & tWriter ) const override { SaveData(tWriter); }
void Save ( Writer_i & tWriter ) const override { SaveData(tWriter); }
void Load ( MemoryReader_c & tReader ) override { LoadData(tReader); }
void Load ( CSphReader & tReader ) override { LoadData(tReader); }
int64_t AllocatedBytes() const override { return m_dValues.GetLengthBytes64(); }
std::unique_ptr<columnar::Iterator_i> CreateIterator() const override { return std::make_unique<ColumnarIterator_Int_T<T>> ( m_dValues ); }
protected:
CSphVector<T> m_dValues;
private:
T m_uMask = 0;
template <typename WRITER>
void SaveData ( WRITER & tWriter ) const;
template <typename READER>
void LoadData ( READER & tReader );
};
template<typename T>
ColumnarAttr_Int_T<T>::ColumnarAttr_Int_T ( ESphAttr eType, int iBits )
: ColumnarAttrRT_c(eType)
, m_uMask ( iBits==64 ? (T)0xFFFFFFFFFFFFFFFFULL : (T)( (1ULL<<iBits)-1 ) )
{}
template<typename T>
template <typename WRITER>
void ColumnarAttr_Int_T<T>::SaveData ( WRITER & tWriter ) const
{
tWriter.PutDword ( m_eType );
tWriter.PutOffset ( m_uMask );
tWriter.PutDword ( m_dValues.GetLength() );
tWriter.PutBytes ( m_dValues.Begin(), (int)m_dValues.GetLengthBytes64() );
}
template<typename T>
template <typename READER>
void ColumnarAttr_Int_T<T>::LoadData ( READER & tReader )
{
m_uMask = (T)tReader.GetOffset();
m_dValues.Resize ( tReader.GetDword() );
tReader.GetBytes ( m_dValues.Begin(), (int)m_dValues.GetLengthBytes64() );
}
/////////////////////////////////////////////////////////////////////
class ColumnarAttr_Bool_c : public ColumnarAttr_Int_T<BYTE>
{
using BASE = ColumnarAttr_Int_T<BYTE>;
public:
ColumnarAttr_Bool_c() : BASE ( SPH_ATTR_BOOL, 1 ) {}
void AddDoc ( SphAttr_t tAttr ) override { BASE::m_dValues.Add ( tAttr ? 1 : 0 ); }
};
/////////////////////////////////////////////////////////////////////
class ColumnarIterator_String_c : public ColumnarIterator_RT_c
{
public:
ColumnarIterator_String_c ( const CSphVector<BYTE> & dData, const CSphVector<int64_t> & dLengths ) : m_dData ( dData ), m_dLengths ( dLengths ) {}
int Get ( uint32_t tRowID, const uint8_t * & pData ) override;
uint8_t * GetPacked ( uint32_t tRowID ) override;
int GetLength ( uint32_t tRowID ) override { return (int)::GetLength ( m_dLengths, tRowID ); }
private:
const CSphVector<BYTE> & m_dData;
const CSphVector<int64_t> & m_dLengths;
};
int ColumnarIterator_String_c::Get ( uint32_t tRowID, const uint8_t * & pData )
{
int64_t iLength, iOffset;
std::tie(iLength, iOffset) = GetLengthOffset ( m_dLengths, tRowID );
pData = iLength>0 ? (const uint8_t*)&m_dData[iOffset] : nullptr;
return (int)iLength;
}
uint8_t * ColumnarIterator_String_c::GetPacked ( uint32_t tRowID )
{
int64_t iLength, iOffset;
std::tie(iLength, iOffset) = GetLengthOffset ( m_dLengths, tRowID );
auto pStr = iLength>0 ? (const uint8_t*)&m_dData[iOffset] : nullptr;
return sphPackPtrAttr ( { pStr, iLength } );
}
class ColumnarAttr_String_c : public ColumnarAttrRT_c
{
public:
ColumnarAttr_String_c() : ColumnarAttrRT_c ( SPH_ATTR_STRING ) {}
void AddDoc ( const BYTE * pData, int iLength ) override;
void Save ( MemoryWriter_c & tWriter ) const override { SaveData(tWriter); }
void Save ( Writer_i & tWriter ) const override { SaveData(tWriter); }
void Load ( MemoryReader_c & tReader ) override { LoadData(tReader); }
void Load ( CSphReader & tReader ) override { LoadData(tReader); }
int64_t AllocatedBytes() const override { return m_dData.GetLengthBytes64() + m_dLengths.GetLengthBytes64(); }
std::unique_ptr<columnar::Iterator_i> CreateIterator() const override { return std::make_unique<ColumnarIterator_String_c> ( m_dData, m_dLengths ); }
private:
CSphVector<BYTE> m_dData;
CSphVector<int64_t> m_dLengths;
int64_t m_iTotalLength = 0;
template <typename WRITER>
void SaveData ( WRITER & tWriter ) const;
template <typename READER>
void LoadData ( READER & tReader );
};
void ColumnarAttr_String_c::AddDoc ( const BYTE * pData, int iLength )
{
m_iTotalLength += iLength;
m_dLengths.Add(m_iTotalLength);
m_dData.Append ( pData, iLength );
}
template <typename WRITER>
void ColumnarAttr_String_c::SaveData ( WRITER & tWriter ) const
{
tWriter.PutDword ( m_eType );
tWriter.PutDword ( m_dLengths.GetLength() );
tWriter.PutBytes ( m_dLengths.Begin(), (int)m_dLengths.GetLengthBytes64() );
tWriter.PutDword ( m_dData.GetLength() );
tWriter.PutBytes ( m_dData.Begin(), (int)m_dData.GetLengthBytes64() );
}
template <typename READER>
void ColumnarAttr_String_c::LoadData ( READER & tReader )
{
m_dLengths.Resize ( tReader.GetDword() );
tReader.GetBytes ( m_dLengths.Begin(), (int)m_dLengths.GetLengthBytes64() );
m_dData.Resize ( tReader.GetDword() );
tReader.GetBytes ( m_dData.Begin(), (int)m_dData.GetLengthBytes64() );
}
/////////////////////////////////////////////////////////////////////
template <typename T>
class ColumnarIterator_MVA_T : public ColumnarIterator_RT_c
{
public:
ColumnarIterator_MVA_T ( const CSphVector<T> & dData, const CSphVector<int> & dLengths ) : m_dData ( dData ), m_dLengths ( dLengths ) {}
int Get ( uint32_t tRowID, const uint8_t * & pData ) override;
uint8_t * GetPacked ( uint32_t tRowID ) override;
int GetLength ( uint32_t tRowID ) override { return ::GetLength ( m_dLengths, tRowID )*sizeof(T); }
private:
const CSphVector<T> & m_dData;
const CSphVector<int> & m_dLengths;
};
template <typename T>
int ColumnarIterator_MVA_T<T>::Get ( uint32_t tRowID, const uint8_t * & pData )
{
int iLength, iOffset;
std::tie(iLength, iOffset) = GetLengthOffset ( m_dLengths, tRowID );
iLength *= sizeof(T);
pData = iLength > 0 ? (const uint8_t*)&m_dData[iOffset] : nullptr;
return iLength;
}
template <typename T>
uint8_t * ColumnarIterator_MVA_T<T>::GetPacked ( uint32_t tRowID )
{
int iLength, iOffset;
std::tie(iLength, iOffset) = GetLengthOffset ( m_dLengths, tRowID );
iLength *= sizeof(T);
auto pMVA = iLength > 0 ? (const uint8_t*)&m_dData[iOffset] : nullptr;
return sphPackPtrAttr ( { pMVA, iLength } );
}
template <typename T>
class ColumnarAttr_MVA_T : public ColumnarAttrRT_c
{
public:
ColumnarAttr_MVA_T ( ESphAttr eType ) : ColumnarAttrRT_c ( eType ) {}
void AddDoc ( const int64_t * pData, int iLength ) override;
void Save ( MemoryWriter_c & tWriter ) const override { SaveData(tWriter); }
void Save ( Writer_i & tWriter ) const override { SaveData(tWriter); }
int64_t AllocatedBytes() const override { return m_dData.GetLengthBytes64() + m_dLengths.GetLengthBytes64(); }
void Load ( MemoryReader_c & tReader ) override { LoadData(tReader); }
void Load ( CSphReader & tReader ) override { LoadData(tReader); }
std::unique_ptr<columnar::Iterator_i> CreateIterator() const override { return std::make_unique<ColumnarIterator_MVA_T<T>> ( m_dData, m_dLengths ); }
private:
CSphVector<T> m_dData;
CSphVector<int> m_dLengths;
int64_t m_iTotalLength = 0;
template <typename WRITER>
void SaveData ( WRITER & tWriter ) const;
template <typename READER>
void LoadData ( READER & tReader );
};
template <typename T>
void ColumnarAttr_MVA_T<T>::AddDoc ( const int64_t * pData, int iLength )
{
m_iTotalLength += iLength;
m_dLengths.Add ( (int)m_iTotalLength );
for ( int i = 0; i < iLength; i++ )
m_dData.Add ( (T)pData[i] );
}
template <typename T>
template <typename WRITER>
void ColumnarAttr_MVA_T<T>::SaveData ( WRITER & tWriter ) const
{
tWriter.PutDword(m_eType);
tWriter.PutDword ( m_dLengths.GetLength() );
tWriter.PutBytes ( m_dLengths.Begin(), (int)m_dLengths.GetLengthBytes64() );
tWriter.PutDword ( m_dData.GetLength() );
tWriter.PutBytes ( m_dData.Begin(), (int)m_dData.GetLengthBytes64() );
}
template <typename T>
template <typename READER>
void ColumnarAttr_MVA_T<T>::LoadData ( READER & tReader )
{
m_dLengths.Resize ( tReader.GetDword() );
tReader.GetBytes ( m_dLengths.Begin(), (int)m_dLengths.GetLengthBytes64() );
m_dData.Resize ( tReader.GetDword() );
tReader.GetBytes ( m_dData.Begin(), (int)m_dData.GetLengthBytes64() );
}
////////////////////////////////////////////////////////////////////
static std::unique_ptr<ColumnarAttrRT_i> CreateColumnarAttrRT ( ESphAttr eType, int iBits )
{
switch ( eType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_FLOAT:
return std::make_unique<ColumnarAttr_Int_T<DWORD>> ( eType, iBits );
case SPH_ATTR_BOOL: return std::make_unique<ColumnarAttr_Bool_c>();
case SPH_ATTR_BIGINT: return std::make_unique<ColumnarAttr_Int_T<int64_t>> ( eType, iBits );
case SPH_ATTR_STRING: return std::make_unique<ColumnarAttr_String_c>();
case SPH_ATTR_UINT32SET: return std::make_unique<ColumnarAttr_MVA_T<DWORD>>(eType);
case SPH_ATTR_INT64SET: return std::make_unique<ColumnarAttr_MVA_T<int64_t>>(eType);
case SPH_ATTR_FLOAT_VECTOR: return std::make_unique<ColumnarAttr_MVA_T<uint32_t>>(eType);
default:
assert ( 0 && "Unsupported type" );
return nullptr;
}
}
/////////////////////////////////////////////////////////////////////
class ColumnarBuilderRT_c : public ColumnarBuilderRT_i
{
public:
explicit ColumnarBuilderRT_c ( const CSphSchema & tSchema );
explicit ColumnarBuilderRT_c ( MemoryReader_c & tReader ) { Load(tReader); }
void SetAttr ( int iAttr, int64_t tAttr ) override { m_dAttrs[iAttr]->AddDoc(tAttr); }
void SetAttr ( int iAttr, const uint8_t * pData, int iLength ) override { m_dAttrs[iAttr]->AddDoc ( pData, iLength ); }
void SetAttr ( int iAttr, const int64_t * pData, int iLength ) override { m_dAttrs[iAttr]->AddDoc ( pData, iLength ); }
bool Done ( std::string & sError ) override { return true; }
void Save ( MemoryWriter_c & tWriter ) override;
CSphVector<std::unique_ptr<ColumnarAttrRT_i>> & GetAttrs() override { return m_dAttrs; }
const CSphVector<std::unique_ptr<ColumnarAttrRT_i>>& GetAttrs() const override { return m_dAttrs; }
void Load ( MemoryReader_c & tReader );
private:
CSphVector<std::unique_ptr<ColumnarAttrRT_i>> m_dAttrs;
};
ColumnarBuilderRT_c::ColumnarBuilderRT_c ( const CSphSchema & tSchema )
{
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
const auto & tAttr = tSchema.GetAttr(i);
if ( tAttr.IsColumnar() )
{
m_dAttrs.Add ( CreateColumnarAttrRT ( tAttr.m_eAttrType, tAttr.m_tLocator.m_iBitCount ) );
assert ( m_dAttrs.Last() );
}
}
}
void ColumnarBuilderRT_c::Save ( MemoryWriter_c & tWriter )
{
tWriter.PutDword ( m_dAttrs.GetLength() );
m_dAttrs.for_each ( [&tWriter]( const auto& i ){ i->Save(tWriter); } );
}
void ColumnarBuilderRT_c::Load ( MemoryReader_c & tReader )
{
m_dAttrs.Resize ( tReader.GetDword() );
m_dAttrs.for_each ( [&tReader]( auto & pAttr ){ pAttr = CreateColumnarAttrRT ( (ESphAttr)tReader.GetDword(), 0 ); pAttr->Load(tReader); } );
}
/////////////////////////////////////////////////////////////////////
class ColumnarRT_c : public ColumnarRT_i
{
public:
explicit ColumnarRT_c ( const CSphVector<std::unique_ptr<ColumnarAttrRT_i>> & dAttrs );
columnar::Iterator_i * CreateIterator ( const std::string & sName, const columnar::IteratorHints_t & tHints, columnar::IteratorCapabilities_t * pCapabilities, std::string & sError ) const override;
std::vector<common::BlockIterator_i *> CreateAnalyzerOrPrefilter ( const std::vector<common::Filter_t> & dFilters, std::vector<int> & dDeletedFilters, const columnar::BlockTester_i & tBlockTester ) const override { return {}; }
int64_t EstimateMinMax ( const common::Filter_t & tFilter, const columnar::BlockTester_i & tBlockTester ) const final { return -1; }
bool GetAttrInfo ( const std::string & sName, columnar::AttrInfo_t & tInfo ) const override;
bool EarlyReject ( const std::vector<common::Filter_t> & dFilters, const columnar::BlockTester_i & tBlockTester ) const override { return false; }
bool IsFilterDegenerate ( const common::Filter_t & tFilter ) const override { return false; }
void Save ( Writer_i & tWriter ) override;
int64_t AllocatedBytes() const override;
protected:
const CSphVector<std::unique_ptr<ColumnarAttrRT_i>>& m_dAttrs;
void PopulateHashFromSchema ( const CSphSchema& tSchema );
private:
SmallStringHash_T<std::pair<ColumnarAttrRT_i*,int>> m_hAttrs;
};
ColumnarRT_c::ColumnarRT_c ( const CSphVector<std::unique_ptr<ColumnarAttrRT_i>>& dAttrs )
: m_dAttrs { dAttrs }
{}
class LightColumnarRT_c : public ColumnarRT_c
{
public:
LightColumnarRT_c ( const CSphSchema& tSchema, const ColumnarBuilderRT_i* pBuilder )
: ColumnarRT_c ( pBuilder->GetAttrs() )
{
PopulateHashFromSchema ( tSchema );
}
};
class FullColumnarRT_c: public ColumnarRT_c
{
public:
FullColumnarRT_c ( const CSphSchema& tSchema, ColumnarBuilderRT_i* pBuilder )
: ColumnarRT_c ( m_dOwnedAttrs )
, m_dOwnedAttrs { std::move (pBuilder->GetAttrs())}
{
PopulateHashFromSchema ( tSchema );
}
FullColumnarRT_c ( const CSphSchema& tSchema, CSphReader& tReader )
: ColumnarRT_c ( m_dOwnedAttrs )
{
m_dOwnedAttrs.Resize ( tReader.GetDword() );
m_dOwnedAttrs.for_each ( [&tReader] ( auto& pAttr )
{
pAttr = CreateColumnarAttrRT ( (ESphAttr)tReader.GetDword(), 0 );
pAttr->Load(tReader);
} );
PopulateHashFromSchema ( tSchema );
}
private:
CSphVector<std::unique_ptr<ColumnarAttrRT_i>> m_dOwnedAttrs;
};
columnar::Iterator_i * ColumnarRT_c::CreateIterator ( const std::string & sName, const columnar::IteratorHints_t & tHints, columnar::IteratorCapabilities_t * pCapabilities, std::string & sError ) const
{
auto * pFound = m_hAttrs ( sName.c_str() );
if ( !pFound )
return nullptr;
return pFound->first->CreateIterator().release();
}
bool ColumnarRT_c::GetAttrInfo ( const std::string & sName, columnar::AttrInfo_t & tInfo ) const
{
auto * pFound = m_hAttrs ( sName.c_str() );
if ( !pFound )
return false;
tInfo.m_iId = pFound->second;
tInfo.m_eType = pFound->first->GetType();
return true;
}
void ColumnarRT_c::Save ( Writer_i & tWriter )
{
tWriter.PutDword ( m_dAttrs.GetLength() );
m_dAttrs.for_each ( [&tWriter]( const auto& pAttr ){ pAttr->Save(tWriter); } );
}
int64_t ColumnarRT_c::AllocatedBytes() const
{
int64_t iTotal = 0;
for ( const auto & i : m_dAttrs )
iTotal += i->AllocatedBytes();
return iTotal;
}
void ColumnarRT_c::PopulateHashFromSchema ( const CSphSchema & tSchema )
{
int iColumnar = 0;
for ( int i = 0; i < tSchema.GetAttrsCount(); ++i )
{
const auto & tAttr = tSchema.GetAttr(i);
if ( !tAttr.IsColumnar() )
continue;
m_hAttrs.Add ( { m_dAttrs[iColumnar].get(), iColumnar }, tAttr.m_sName );
++iColumnar;
}
assert ( m_hAttrs.GetLength() == m_dAttrs.GetLength() );
}
/////////////////////////////////////////////////////////////////////
std::unique_ptr<ColumnarBuilderRT_i> CreateColumnarBuilderRT ( MemoryReader_c & tReader )
{
return std::make_unique<ColumnarBuilderRT_c> ( tReader );
}
std::unique_ptr<ColumnarBuilderRT_i> CreateColumnarBuilderRT ( const CSphSchema & tSchema )
{
if ( !tSchema.HasColumnarAttrs() )
return nullptr;
return std::make_unique<ColumnarBuilderRT_c> ( tSchema );
}
// columnar reader will NOT take ownership of attributes in columnar builder
std::unique_ptr<ColumnarRT_i> CreateLightColumnarRT ( const CSphSchema& tSchema, const ColumnarBuilderRT_i* pBuilder )
{
if ( !pBuilder )
return nullptr;
return std::make_unique<LightColumnarRT_c> ( tSchema, pBuilder );
}
// columnar reader will take ownership of attributes in columnar builder
std::unique_ptr<ColumnarRT_i> CreateColumnarRT ( const CSphSchema& tSchema, ColumnarBuilderRT_i* pBuilder )
{
if ( !pBuilder )
return nullptr;
return std::make_unique<FullColumnarRT_c> ( tSchema, pBuilder );
}
std::unique_ptr<ColumnarRT_i> CreateColumnarRT ( const CSphSchema & tSchema, CSphReader & tReader, CSphString & sError )
{
auto pColumnar = std::make_unique<FullColumnarRT_c>( tSchema, tReader );
if ( tReader.GetErrorFlag() )
{
sError.SetSprintf ( "error loading columnar attribute storage: %s", tReader.GetErrorMessage().cstr() );
return nullptr;
}
return pColumnar;
}
void RemoveColumnarDuplicates ( std::unique_ptr<ColumnarBuilderRT_i> & pBuilder, const CSphFixedVector<RowID_t> & dRowMap, const CSphSchema & tSchema )
{
if ( !pBuilder )
return;
if ( !dRowMap.any_of ( []( RowID_t tRowID ){ return tRowID==INVALID_ROWID; } ) )
return;
std::unique_ptr<ColumnarBuilderRT_i> pNewBuilder = CreateColumnarBuilderRT(tSchema);
{
std::unique_ptr<ColumnarRT_i> pColumnar = CreateLightColumnarRT ( tSchema, pBuilder.get() );
CSphVector<ScopedTypedIterator_t> dIterators = CreateAllColumnarIterators ( pColumnar.get(), tSchema );
CSphVector<int64_t> dTmpMVA;
for ( RowID_t tSrcRowID = 0; tSrcRowID < dRowMap.GetLength(); tSrcRowID++ )
{
if ( dRowMap[tSrcRowID]==INVALID_ROWID )
continue;
ARRAY_FOREACH ( iAttr, dIterators )
SetColumnarAttr ( iAttr, dIterators[iAttr].second, pNewBuilder.get(), dIterators[iAttr].first, tSrcRowID, dTmpMVA );
}
}
pBuilder = std::move(pNewBuilder);
}
| 21,018
|
C++
|
.cpp
| 502
| 39.818725
| 228
| 0.700285
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,934
|
wordbreaker.cpp
|
manticoresoftware_manticoresearch/src/wordbreaker.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#define PREFER_ALLDICT 0
#define UNKNOWN_WORD_COEFF 3.0f
#define DICT_THRESH 12
#define DEFAULT_DICT "wordbreaker-dict.txt"
#define DICT_COMPOUND_MIN 6 // keywords longer than this are potential compounds
#define DICT_COMPOUND_THRESH 0.0001f // penalize a no-split with a rare potential compound (that occurs in less than this percentage)
#define DICT_COMPOUND_COEFF 1.53f // penalization factor
#include "sphinxstd.h"
#include <math.h>
/// my own isalpha (let's build our own theme park!)
inline int IsAlpha ( int c )
{
return ( c>='a' && c<='z' );
}
// copied over from sphinxutils; remove at some point
static void StrSplit ( StrVec_t & dOut, const char * sIn )
{
if ( !sIn )
return;
const char * p = sIn;
while ( *p )
{
// skip non-alphas
while ( (*p) && !IsAlpha(*p) )
p++;
if ( !(*p) )
break;
// this is my next token
assert ( IsAlpha(*p) );
const char * sNext = p;
while ( IsAlpha(*p) )
p++;
if ( sNext!=p )
dOut.Add().SetBinary ( sNext, int ( p-sNext ) );
}
}
/// probability descriptor
struct Prob_t
{
float m_fProb; ///< log(P(w)) for dict keywords, 0 otherwise (note that P(w)<0 at all times)
bool m_bDict; ///< false dor dict keywords, true otherwise
/// construct a non-dict keyword as default
Prob_t()
: m_fProb ( 0.0f )
, m_bDict ( false )
{}
};
/// stupid easy unigram model
class LangModel_c
{
private:
typedef CSphOrderedHash < float, CSphString, CSphStrHashFunc, 1048576 > HashType_t;
HashType_t * m_pHash;
int m_iTotal = 0;
float m_fLogTotal = 0.0f;
public:
float m_fLogMin;
LangModel_c()
{
m_pHash = new HashType_t();
m_fLogMin = logf ( (float)DICT_THRESH );
}
~LangModel_c()
{
SafeDelete ( m_pHash );
}
void SetTotal ( int iTotal )
{
m_iTotal = iTotal;
m_fLogTotal = logf ( (float)iTotal );
}
void AddWord ( const char * sWord, int iFreq )
{
// we use 1 as a special case
assert ( iFreq>1 );
CSphString sVal ( sWord );
m_pHash->Add ( logf((float)iFreq), sVal );
}
Prob_t GetProb ( const char * sKey, int iLen ) const
{
Prob_t r;
CSphString sKey2;
sKey2.SetBinary ( sKey, iLen );
// found? P(w) = log(freq/total) = log(freq) - log(total)
float * pVal = (*m_pHash)(sKey2);
if ( pVal && *pVal >= m_fLogMin )
{
r.m_fProb = *pVal - m_fLogTotal;
r.m_bDict = true;
} else
{
#if 1
// not found? lets do some tricks
// we wanna penalize non-dict "words" compared to dict words
// we wanna penalize super-long "words" compared to shorter options
// we DO NOT wanna overpenalize (dict + junkN) combo vs (junkM) though
if ( iLen>20 )
iLen = 20;
static float k[21] =
{
9.9f, 7.5f, 4.2f, 2.1f, 1.5f, // 0, 1, 2, 3, 4,
1.2f, 1.1f, 1.0f, 1.1f, 1.2f, // 5, 6, 7, 8, 9,
1.5f, 1.8f, 2.3f, 2.7f, 3.2f, // 10, 11, 12, 13, 14
3.6f, 4.0f, 4.4f, 4.8f, 5.3f, // 15, 16, 17, 18, 19
9.9f // 20
};
r.m_fProb = -m_fLogTotal * ( 0.5f + k[iLen] );
#else
r.m_fProb = -m_fLogTotal * UNKNOWN_WORD_COEFF;
#endif
r.m_bDict = false;
}
return r;
}
float GetLogTotal() const
{
return -m_fLogTotal;
}
};
/// compound meta-probability
/// store sum_{w \in words} ( log(P(w)) ) as m_fProb
/// store and_{w \in words} ( m_bDict ) as m_bDict
struct Split_t
{
CSphVector<int> m_Pos; ///< split positions
float m_fProb;
bool m_bAllDict;
bool m_bAnyDict;
Split_t()
: m_fProb ( 0.0f )
, m_bAllDict ( false )
, m_bAnyDict ( false )
{}
void Dump ( const char * sWord, const char * sHead=NULL )
{
if ( sHead )
printf ( "%s: ", sHead );
int iCur = 0;
ARRAY_FOREACH ( j, m_Pos )
{
while ( iCur<m_Pos[j] )
printf ( "%c", sWord[iCur++] );
printf ( "|" );
}
while ( sWord[iCur] )
printf ( "%c", sWord[iCur++] );
printf ( ", %f", m_fProb );
if ( m_bAllDict )
printf ( " (all-dict)" );
else if ( m_bAnyDict )
printf ( " (some-dict)" );
printf ( "\n" );
}
void AddSplitPos ( const Prob_t & p, int iPos )
{
// if current split is empty, new one just takes over
if ( !m_Pos.GetLength() )
{
m_Pos.Add ( iPos );
m_fProb = p.m_fProb;
m_bAllDict = p.m_bDict;
m_bAnyDict = p.m_bDict;
return;
}
// if we have data, we combine it
assert ( m_Pos.Last() < iPos );
m_Pos.Add ( iPos );
m_fProb += p.m_fProb;
m_bAllDict &= p.m_bDict;
m_bAnyDict |= p.m_bDict;
}
bool operator < ( const Split_t & rhs ) const
{
#if PREFER_ALLDICT
if ( m_bAllDict!=rhs.m_bAllDict )
return m_bAllDict < rhs.m_bAllDict;
#endif
// do not (!) check anydict flag on an initial empty split
if ( m_Pos.GetLength() )
if ( m_bAnyDict!=rhs.m_bAnyDict )
return m_bAnyDict < rhs.m_bAnyDict;
return m_fProb < rhs.m_fProb;
}
};
static LangModel_c g_LM;
static void UrlBreakInit ( const char * sDict, bool bVerbose )
{
FILE * fp = fopen ( sDict, "rb" );
if ( !fp )
sphDie ( "failed to open %s", sDict );
int iEntries = 0, iSumFreqs = 0;
char sBuf[512];
sBuf[0] = 0;
while ( fgets ( sBuf, sizeof(sBuf), fp ) )
{
// extract keyword
char * p = sBuf;
while ( *p && *p!=' ' )
p++;
if ( *p!=' ' )
sphDie ( "bad freqdict line: no space" );
*p++ = 0;
// extract freq
int iFreq = atoi(p);
if ( !iFreq )
sphDie ( "bad freqdict line: zero freq" );
iSumFreqs += iFreq;
#if 0
// only keep frequent-enough words
if ( iFreq < MIN_FREQ )
continue;
#endif
// only keep all-latin words
bool bLatin = true;
char * s = sBuf;
while ( *s )
{
if ( !( *s>='a' && *s<='z' ) && !( *s>='0' && *s<='9' ) )
{
bLatin = false;
break;
}
s++;
}
if ( !bLatin )
continue;
// hash it
g_LM.AddWord ( sBuf, iFreq );
iEntries++;
}
fclose ( fp );
g_LM.SetTotal ( iSumFreqs );
if ( bVerbose )
printf ( "kept %d entries, total %d hits\n", iEntries, iSumFreqs );
}
static void UrlBreak ( Split_t & tBest, const char * sWord )
{
auto iLen = (const int) strlen(sWord);
tBest.m_Pos.Resize(0);
// current partial splits
// begin with an empty one
CSphVector<Split_t> dSplits;
dSplits.Add();
// our best guess so far
// begin with a trivial baseline one (ie. no splits at all)
Prob_t p = g_LM.GetProb ( sWord, iLen );
tBest.m_Pos.Add ( iLen );
tBest.m_fProb = p.m_fProb;
tBest.m_bAllDict = tBest.m_bAnyDict = p.m_bDict;
if ( iLen>=DICT_COMPOUND_MIN && tBest.m_bAllDict )
{
static const float THRESH = logf ( DICT_COMPOUND_THRESH );
if ( tBest.m_fProb<=THRESH )
tBest.m_fProb *= DICT_COMPOUND_COEFF;
}
// work the current splits
CSphVector<Split_t> dSplits2;
while ( dSplits.GetLength() )
{
int iWorkedSplits = 0;
float fPrevBest = tBest.m_fProb;
ARRAY_FOREACH ( iSplit, dSplits )
{
Split_t & s = dSplits[iSplit];
// filter out splits that were added before (!) a new best guess on the previous iteration
if ( dSplits[iSplit] < tBest )
continue;
iWorkedSplits++;
int iLast = 0;
if ( s.m_Pos.GetLength() )
iLast = s.m_Pos.Last();
for ( int i=1+iLast; i<iLen; i++ )
{
// consider a split at position i
// it generates a word candidate [iLast,i) and a tail [i,iLen)
// let's score those
Prob_t tCand = g_LM.GetProb ( sWord+iLast, i-iLast );
Prob_t tTail = g_LM.GetProb ( sWord+i, iLen-i );
// if the current best is all-keywords, the new candidates must be, too
if ( tBest.m_bAllDict && !tCand.m_bDict )
continue;
// compute partial and full split candidates generated by the current guess
Split_t tPartial = s;
tPartial.AddSplitPos ( tCand, i );
Split_t tFull = tPartial;
tFull.AddSplitPos ( tTail, iLen );
// check if the full one is our new best full one
if ( tBest < tFull )
{
// FIXME? we do this even when the new split is *not* all-keywords,
// but the old best split was; is this ever a problem?
tBest = tFull;
// tBest.Dump ( sWord, "new-best" );
}
// check if the resulting partial split is worth scanning further
if ( tBest < tPartial )
{
dSplits2.Add ( tPartial );
// dSplits2.Last().Dump ( sWord, "scan-partial" );
}
}
}
// damage control!
// if we just processed over 100K candidate splits and got no improvement
// lets assume that our chances of getting one are kinda low and bail
if ( iWorkedSplits>=100000 && tBest.m_fProb>=fPrevBest )
break;
// keep going
dSplits.SwapData ( dSplits2 );
dSplits2.Resize ( 0 );
}
}
static char * Strip ( char * sBuf )
{
char * p = sBuf;
while ( *p && isspace(*p) )
p++;
char * e = p + strlen(p) - 1;
while ( e>=p && *e && isspace(*e) )
*e-- = '\0';
return p;
}
static void UrlBreakTest ( const char * sTestFile )
{
// load the test data
CSphVector <StrVec_t> dTests;
FILE * fp = fopen ( sTestFile, "rb" );
if ( !fp )
sphDie ( "failed to open %s", sTestFile );
char sBuf[1024];
while ( fgets ( sBuf, sizeof(sBuf), fp ) )
{
// strip spaces
char * p = Strip ( sBuf );
// ignore empty lines
if ( !*p )
continue;
// ignore comments
if ( p[0]=='/' && p[1]=='/' )
continue;
// parse!
StrVec_t & dTest = dTests.Add();
StrSplit ( dTest, p );
if ( dTest.GetLength()<2 )
{
if ( dTest.GetLength()==1 )
printf ( "WARNING: no substrings defined for base %s\n", dTest[0].cstr() );
dTests.Pop();
}
}
fclose ( fp );
// self check
int iNosplit = 0;
ARRAY_FOREACH ( i, dTests )
{
if ( dTests[i][1]==dTests[i][0] )
{
// printf ( "WARNING: base %s is the only substring\n", dTests[i][0].cstr() );
iNosplit++;
}
for ( int j=1; j<dTests[i].GetLength(); j++ )
if ( !strstr ( dTests[i][0].cstr(), dTests[i][j].cstr() ) )
printf ( "WARNING: substring %s not found in base %s\n", dTests[i][j].cstr(), dTests[i][0].cstr() );
}
if ( iNosplit )
printf ( "total %d nosplits, %.3f of the test suite\n",
iNosplit, float(iNosplit)/dTests.GetLength() );
int iTotal = 0;
int iGood = 0;
int64_t tmWall = sphMicroTimer();
ARRAY_FOREACH ( iTest, dTests )
{
#ifndef NDEBUG
int64_t tmWord = sphMicroTimer();
#endif
const char * sWord = dTests[iTest][0].cstr();
// break into keywords
Split_t tBest;
UrlBreak ( tBest, sWord );
// generate actual strings
int iCur = 0;
StrVec_t dWords;
ARRAY_FOREACH ( i, tBest.m_Pos )
{
dWords.Add().SetBinary ( sWord+iCur, tBest.m_Pos[i]-iCur );
iCur = tBest.m_Pos[i];
}
if ( sWord[iCur] )
dWords.Add ( sWord+iCur );
// check them and compute precision
bool bGood = true;
for ( int j=1; j<dTests[iTest].GetLength() && bGood; j++ )
if ( !dWords.Contains ( dTests[iTest][j] ) )
bGood = false;
iTotal++;
if ( bGood )
iGood++;
// debug dump
#ifndef NDEBUG
if ( !bGood )
{
printf ( "%d msec, %s => ", (int)( ( sphMicroTimer() - tmWord )/1000 ), sWord );
tBest.Dump ( sWord );
}
#endif
}
// results
if ( iTotal )
printf ( "prec %.3f, wall %d msec, %d good, %d total\n",
float(iGood)/iTotal, (int)( ( sphMicroTimer() - tmWall )/1000 ), iGood, iTotal );
else
printf ( "prec INF, wall %d msec, %d good, 0 total\n", (int) (
( sphMicroTimer () - tmWall ) / 1000 ), iGood );
if ( iTotal-iNosplit )
printf ( "prec %.3f, %d total w/o nosplits\n", float(iGood)/(iTotal-iNosplit), iTotal-iNosplit );
else
printf ( "prec INF, 0 total w/o nosplits\n" );
}
static bool UrlBreakIsChar ( int c )
{
return ( c>='a' && c<='z' );
}
static void UrlBreakBench ( const char * sBenchFile )
{
int64_t tmWall = sphMicroTimer();
const int MIN_BREAK = 5;
FILE * fp = fopen ( sBenchFile, "rb" );
if ( !fp )
sphDie ( "failed to open %s", sBenchFile );
Split_t tBest;
char sBuf[512];
while ( fgets ( sBuf, sizeof(sBuf), fp ) )
{
char * p = sBuf;
while ( *p )
{
while ( *p && !UrlBreakIsChar(*p) )
p++;
if ( !*p )
break;
char * sUrl = p;
while ( UrlBreakIsChar(*p) )
p++;
if ( p-sUrl < MIN_BREAK )
continue;
if ( *p )
*p++ = '\0';
#if 0
int64_t tmWord = sphMicroTimer();
#endif
UrlBreak ( tBest, sUrl );
#if 0
if ( !tBest.m_bAllDict )
{
printf ( "%d usec, %s => ", (int)( (sphMicroTimer()-tmWord) ), sUrl );
tBest.Dump ( sUrl );
}
#endif
}
}
fclose ( fp );
tmWall = sphMicroTimer() - tmWall;
printf ( "%d msec\n", (int)(tmWall/1000) );
}
static void UrlBreakSplit()
{
char sBuf[1024];
char sSpace[] = " ";
while ( !feof ( stdin ) )
{
// read next one
if ( !fgets ( sBuf, sizeof(sBuf), stdin ) )
continue;
char * pMax = sBuf + strlen(sBuf);
char * p = sBuf;
while ( p<pMax )
{
while ( p<pMax && !IsAlpha(*p) )
p++;
if ( p>=pMax )
break;
char * sWord = p;
while ( p<pMax && IsAlpha(*p) )
p++;
*p = '\0';
// break this token into keywords
Split_t tBest;
UrlBreak ( tBest, sWord );
// generate actual strings
int iCur = 0;
StrVec_t dWords;
ARRAY_FOREACH ( i, tBest.m_Pos )
{
fwrite ( sWord+iCur, 1, tBest.m_Pos[i]-iCur, stdout );
fwrite ( sSpace, 1, 1, stdout );
iCur = tBest.m_Pos[i];
}
if ( sWord[iCur] )
printf ( "%s ", sWord+iCur );
}
// all done
printf ( "\n" );
fflush ( stdout );
}
}
int main ( int argc, char ** argv )
{
const char * sDict = DEFAULT_DICT;
if ( argc<2 )
{
printf (
"wordbreaker, a tool to split compounds (eg URL parts) into individual words\n"
"\n"
"Usage: wordbreaker <COMMAND> [OPTIONS]\n"
"\n"
"Commands are:\n"
"test <TESTFILE>\t\tdo a splitting precision test on TESTFILE\n"
"bench <TESTFILE>\tdo a splitting performance benchmark on TESTFILE\n"
"split\t\t\tdo splitting (read from stdin, split, print to stdout)\n"
"\n"
"Options are:\n"
"--dict <FILENAME>\tuse FILENAME as a frequency dictionary\n"
"\t\t\t(default is " DEFAULT_DICT ")\n"
);
return 0;
}
enum
{
CMD_NONE,
CMD_TEST,
CMD_BENCH,
CMD_SPLIT
} eCommand = CMD_NONE;
const char * sFile = "";
for ( int i=1; i<argc; i++ )
{
if ( !strcmp ( argv[i], "test") )
{
if ( eCommand!=CMD_NONE )
sphDie ( "you must specify exactly one command" );
if ( ++i>=argc )
sphDie ( "test requires an argument" );
eCommand = CMD_TEST;
sFile = argv[i];
} else if ( !strcmp ( argv[i], "bench") )
{
if ( eCommand!=CMD_NONE )
sphDie ( "you must specify exactly one command" );
if ( ++i>=argc )
sphDie ( "bench requires an argument" );
eCommand = CMD_BENCH;
sFile = argv[i];
} else if ( !strcmp ( argv[i], "split") )
{
if ( eCommand!=CMD_NONE )
sphDie ( "you must specify exactly one command" );
eCommand = CMD_SPLIT;
} else if ( !strcmp ( argv[i], "--dict") )
{
if ( ++i>=argc )
sphDie ( "--dict requires an argument" );
sDict = argv[i];
} else
{
sphDie ( "unknown switch: %s", argv[i] );
}
}
switch ( eCommand )
{
case CMD_NONE:
sphDie ( "no command given" );
case CMD_TEST:
UrlBreakInit ( sDict, true );
UrlBreakTest ( sFile );
break;
case CMD_BENCH:
UrlBreakInit ( sDict, true );
UrlBreakBench ( sFile );
break;
case CMD_SPLIT:
UrlBreakInit ( sDict, false );
UrlBreakSplit ();
break;
}
return 0;
}
| 15,487
|
C++
|
.cpp
| 597
| 22.867672
| 134
| 0.617862
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,935
|
searchdddl.cpp
|
manticoresoftware_manticoresearch/src/searchdddl.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "searchdddl.h"
class DdlParser_c : public SqlParserTraits_c
{
public:
// this exists because we have separate field/attribute entities in the schema, but not in DDL
enum
{
FLAG_NONE = 0,
FLAG_STORED = 1<<0,
FLAG_INDEXED = 1<<1,
FLAG_ATTRIBUTE = 1<<2
};
struct ItemOptions_t
{
AttrEngine_e m_eEngine = AttrEngine_e::DEFAULT;
bool m_bStringHash = true;
bool m_bFastFetch = true;
bool m_bIndexed = false;
bool m_bHashOptionSet = false;
CSphString m_sKNNType;
int m_iKNNDims = 0;
int m_iHNSWM = 16;
int m_iHNSWEFConstruction = 200;
knn::HNSWSimilarity_e m_eHNSWSimilarity = knn::HNSWSimilarity_e::L2;
bool m_bKNNDimsSpecified = false;
bool m_bHNSWSimilaritySpecified = false;
void Reset() { *this = ItemOptions_t(); }
DWORD ToFlags() const;
knn::IndexSettings_t ToKNN() const;
void CopyOptionsTo ( CreateTableAttr_t & tAttr ) const;
};
DdlParser_c ( CSphVector<SqlStmt_t>& dStmt, const char* szQuery, CSphString* pError );
const char * GetLastError() const { return m_sError.scstr(); }
bool AddCreateTableCol ( const SqlNode_t & tName, const SqlNode_t & tCol );
bool AddCreateTableId ( const SqlNode_t & tName );
void AddCreateTableBitCol ( const SqlNode_t & tCol, int iBits );
bool AddItemOptionEngine ( const SqlNode_t & tOption );
bool AddItemOptionHash ( const SqlNode_t & tOption );
bool AddItemOptionFastFetch ( const SqlNode_t & tOption );
bool AddItemOptionIndexed ( const SqlNode_t & tOption );
bool AddItemOptionKNNType ( const SqlNode_t & tOption );
bool AddItemOptionKNNDims ( const SqlNode_t & tOption );
bool AddItemOptionHNSWSimilarity ( const SqlNode_t & tOption );
bool AddItemOptionHNSWM ( const SqlNode_t & tOption );
bool AddItemOptionHNSWEfConstruction ( const SqlNode_t & tOption );
void AddCreateTableOption ( const SqlNode_t & tName, const SqlNode_t & tValue );
bool SetupAlterTable ( const SqlNode_t & tIndex, const SqlNode_t & tAttr, const SqlNode_t & tType, bool bModify = false );
bool SetupAlterTable ( const SqlNode_t & tIndex, const SqlNode_t & tAttr, ESphAttr eAttr, int iFieldFlags, int iBits=-1, bool bModify = false );
void JoinClusterAt ( const SqlNode_t & tAt );
void AddInsval ( CSphVector<SqlInsert_t> & dVec, const SqlNode_t & tNode );
private:
CSphString m_sError;
ItemOptions_t m_tItemOptions;
void AddField ( const CSphString & sName, DWORD uFlags );
bool ConvertToAttrEngine ( const SqlNode_t & tEngine, AttrEngine_e & eEngine );
static bool CheckFieldFlags ( ESphAttr eAttrType, int iFlags, const CSphString & sName, const ItemOptions_t & tOpts, CSphString & sError );
};
#define YYSTYPE SqlNode_t
// unused parameter, simply to avoid type clash between all my yylex() functions
#define YY_DECL static int my_lex ( YYSTYPE * lvalp, void * yyscanner, DdlParser_c * pParser )
#if _WIN32
#define YY_NO_UNISTD_H 1
#endif
#include "flexddl.c"
static void yyerror ( DdlParser_c * pParser, const char * sMessage )
{
// flex put a zero at last token boundary; make it undo that
yy3lex_unhold ( pParser->m_pScanner );
// 'wrong parser' is quite empiric - we fire it when from very beginning parser sees syntax error
pParser->ProcessParsingError ( sMessage );
}
#ifndef NDEBUG
// using a proxy to be possible to debug inside yylex
static int yylex ( YYSTYPE * lvalp, DdlParser_c * pParser )
{
int res = my_lex ( lvalp, pParser->m_pScanner, pParser );
return res;
}
#else
static int yylex ( YYSTYPE * lvalp, DdlParser_c * pParser )
{
return my_lex ( lvalp, pParser->m_pScanner, pParser );
}
#endif
#include "bisddl.c"
//////////////////////////////////////////////////////////////////////////
DWORD DdlParser_c::ItemOptions_t::ToFlags() const
{
DWORD uFlags = 0;
uFlags |= m_bStringHash ? CSphColumnInfo::ATTR_COLUMNAR_HASHES : 0;
uFlags |= m_bFastFetch ? CSphColumnInfo::ATTR_STORED : 0;
uFlags |= m_bIndexed ? CSphColumnInfo::ATTR_INDEXED_SI : 0;
uFlags |= m_sKNNType.IsEmpty() ? 0 : CSphColumnInfo::ATTR_INDEXED_KNN;
return uFlags;
}
knn::IndexSettings_t DdlParser_c::ItemOptions_t::ToKNN() const
{
knn::IndexSettings_t tKNN;
tKNN.m_iDims = m_iKNNDims;
tKNN.m_eHNSWSimilarity = m_eHNSWSimilarity;
tKNN.m_iHNSWM = m_iHNSWM;
tKNN.m_iHNSWEFConstruction = m_iHNSWEFConstruction;
return tKNN;
}
void DdlParser_c::ItemOptions_t::CopyOptionsTo ( CreateTableAttr_t & tAttr ) const
{
tAttr.m_tAttr.m_eEngine = m_eEngine;
tAttr.m_bFastFetch = m_bFastFetch;
tAttr.m_bStringHash = m_bStringHash;
tAttr.m_bIndexed = m_bIndexed;
}
//////////////////////////////////////////////////////////////////////////
DdlParser_c::DdlParser_c ( CSphVector<SqlStmt_t> & dStmt, const char* szQuery, CSphString* pError )
: SqlParserTraits_c ( dStmt, szQuery, pError )
{
if ( m_dStmt.IsEmpty() )
PushQuery ();
else
m_pStmt = &m_dStmt.Last();
assert ( m_dStmt.GetLength()==1 );
m_sErrorHeader = "P03:";
}
void DdlParser_c::AddCreateTableBitCol ( const SqlNode_t & tCol, int iBits )
{
assert(m_pStmt);
CreateTableAttr_t & tAttr = m_pStmt->m_tCreateTable.m_dAttrs.Add();
ToString ( tAttr.m_tAttr.m_sName, tCol );
tAttr.m_tAttr.m_sName.ToLower();
tAttr.m_tAttr.m_eAttrType = SPH_ATTR_INTEGER;
tAttr.m_tAttr.m_tLocator.m_iBitCount = iBits;
m_tItemOptions.CopyOptionsTo(tAttr);
m_tItemOptions.Reset();
}
void DdlParser_c::AddField ( const CSphString & sName, DWORD uFlags )
{
assert(m_pStmt);
auto & tField = m_pStmt->m_tCreateTable.m_dFields.Add();
tField.m_sName = sName;
tField.m_uFieldFlags = uFlags;
}
static DWORD ConvertFlags ( int iFlags )
{
// convert flags;
DWORD uFieldFlags = 0;
uFieldFlags |= ( iFlags & DdlParser_c::FLAG_INDEXED ) ? CSphColumnInfo::FIELD_INDEXED : 0;
uFieldFlags |= ( iFlags & DdlParser_c::FLAG_STORED ) ? CSphColumnInfo::FIELD_STORED : 0;
uFieldFlags |= ( iFlags & DdlParser_c::FLAG_ATTRIBUTE ) ? CSphColumnInfo::FIELD_IS_ATTRIBUTE : 0;
return uFieldFlags;
}
bool DdlParser_c::CheckFieldFlags ( ESphAttr eAttrType, int iFlags, const CSphString & sName, const ItemOptions_t & tOpts, CSphString & sError )
{
if ( eAttrType!=SPH_ATTR_FLOAT_VECTOR && !tOpts.m_sKNNType.IsEmpty() )
{
sError = "knn_type='hnsw' can only be used with float_vector attributes";
return false;
}
if ( eAttrType==SPH_ATTR_STRING )
{
if ( ( iFlags & FLAG_ATTRIBUTE ) && ( iFlags & FLAG_STORED ) )
{
sError.SetSprintf ( "unable to create a stored attribute '%s'", sName.cstr() );
return false;
}
}
else if ( eAttrType==SPH_ATTR_FLOAT_VECTOR )
{
if ( !tOpts.m_sKNNType.IsEmpty() && ( !tOpts.m_bKNNDimsSpecified || !tOpts.m_bHNSWSimilaritySpecified ) )
{
sError = "knn_dims and hnsw_similarity are required if knn_type='hnsw'";
return false;
}
}
else
{
if ( iFlags )
{
sError.SetSprintf ( "options 'attribute', 'stored', 'indexed' are not applicable to non-string column '%s'", sName.cstr() );
return false;
}
if ( tOpts.m_bHashOptionSet )
{
sError.SetSprintf ( "'hash' is applicable to columnar strings only (attempted to set for '%s')", sName.cstr() );
return false;
}
}
return true;
}
bool DdlParser_c::SetupAlterTable ( const SqlNode_t & tIndex, const SqlNode_t & tAttr, ESphAttr eAttr, int iFieldFlags, int iBits, bool bModify )
{
assert( m_pStmt );
m_pStmt->m_eStmt = bModify ? STMT_ALTER_MODIFY : STMT_ALTER_ADD;
ToString ( m_pStmt->m_sIndex, tIndex );
ToString ( m_pStmt->m_sAlterAttr, tAttr );
m_pStmt->m_sIndex.ToLower();
m_pStmt->m_sAlterAttr.ToLower();
m_pStmt->m_eAlterColType = eAttr;
m_pStmt->m_uFieldFlags = ConvertFlags(iFieldFlags);
m_pStmt->m_uAttrFlags = m_tItemOptions.ToFlags();
m_pStmt->m_eEngine = m_tItemOptions.m_eEngine;
m_pStmt->m_iBits = iBits;
m_pStmt->m_tAlterKNN = m_tItemOptions.ToKNN();
bool bOk = CheckFieldFlags ( m_pStmt->m_eAlterColType, iFieldFlags, m_pStmt->m_sAlterAttr, m_tItemOptions, m_sError );
m_tItemOptions.Reset();
return bOk;
}
bool DdlParser_c::SetupAlterTable ( const SqlNode_t & tIndex, const SqlNode_t & tAttr, const SqlNode_t & tType, bool bModify )
{
return SetupAlterTable ( tIndex, tAttr, (ESphAttr)tType.GetValueInt(), tType.m_iType, -1, bModify );
}
bool DdlParser_c::AddCreateTableCol ( const SqlNode_t & tName, const SqlNode_t & tCol )
{
assert( m_pStmt );
CSphString sName;
ToString ( sName, tName );
sName.ToLower ();
auto eAttrType = (ESphAttr) tCol.GetValueInt();
auto iType = tCol.m_iType;
ItemOptions_t tOpts = m_tItemOptions;
m_tItemOptions.Reset();
if ( !CheckFieldFlags ( eAttrType, iType, sName, tOpts, m_sError ) )
return false;
if ( eAttrType!=SPH_ATTR_STRING )
{
CreateTableAttr_t & tAttr = m_pStmt->m_tCreateTable.m_dAttrs.Add();
tAttr.m_tAttr.m_sName = sName;
tAttr.m_tAttr.m_eAttrType = eAttrType;
tOpts.CopyOptionsTo(tAttr);
tAttr.m_bKNN = !tOpts.m_sKNNType.IsEmpty();
tAttr.m_tKNN = tOpts.ToKNN();
return true;
}
// actually, this may or may not be a field
// it all depends on the combination of flags provided
assert ( eAttrType==SPH_ATTR_STRING );
if ( iType & FLAG_ATTRIBUTE )
{
// add attribute
CreateTableAttr_t & tAttr = m_pStmt->m_tCreateTable.m_dAttrs.Add();
tAttr.m_tAttr.m_sName = sName;
tAttr.m_tAttr.m_eAttrType = SPH_ATTR_STRING;
tOpts.CopyOptionsTo(tAttr);
if ( iType & FLAG_INDEXED )
AddField ( sName, CSphColumnInfo::FIELD_INDEXED );
}
else
{
// convert flags;
DWORD uFieldFlags = 0;
uFieldFlags |= ( iType & FLAG_INDEXED ) ? CSphColumnInfo::FIELD_INDEXED : 0;
uFieldFlags |= ( iType & FLAG_STORED ) ? CSphColumnInfo::FIELD_STORED : 0;
if ( !uFieldFlags )
uFieldFlags = CSphColumnInfo::FIELD_INDEXED | CSphColumnInfo::FIELD_STORED;
AddField ( sName, uFieldFlags );
}
return true;
}
bool DdlParser_c::AddCreateTableId ( const SqlNode_t & tName )
{
assert( m_pStmt );
CSphString sName;
ToString ( sName, tName );
sName.ToLower();
ItemOptions_t tOpts = m_tItemOptions;
m_tItemOptions.Reset();
if ( sName!="id" )
{
m_sError.SetSprintf ( "expected 'id', got '%s'", sName.cstr() );
return false;
}
if ( tOpts.m_bHashOptionSet )
{
m_sError = "cannot set 'hash' option for 'id'";
return false;
}
CreateTableAttr_t & tAttr = m_pStmt->m_tCreateTable.m_dAttrs.Add();
tAttr.m_tAttr.m_sName = sName;
tAttr.m_tAttr.m_eAttrType = SPH_ATTR_BIGINT;
tOpts.CopyOptionsTo(tAttr);
return true;
}
bool DdlParser_c::AddItemOptionEngine ( const SqlNode_t & tOption )
{
return ConvertToAttrEngine ( tOption, m_tItemOptions.m_eEngine );
}
bool DdlParser_c::AddItemOptionHash ( const SqlNode_t & tOption )
{
CSphString sValue = ToStringUnescape(tOption);
m_tItemOptions.m_bStringHash = !!strtoull ( sValue.cstr(), NULL, 10 );
m_tItemOptions.m_bHashOptionSet = true;
return true;
}
bool DdlParser_c::AddItemOptionFastFetch ( const SqlNode_t & tOption )
{
CSphString sValue = ToStringUnescape(tOption);
m_tItemOptions.m_bFastFetch = !!strtoull ( sValue.cstr(), NULL, 10 );
return true;
}
bool DdlParser_c::AddItemOptionIndexed ( const SqlNode_t & tOption )
{
CSphString sValue = ToStringUnescape(tOption);
m_tItemOptions.m_bIndexed = !!strtoull ( sValue.cstr(), NULL, 10 );
return true;
}
bool DdlParser_c::AddItemOptionKNNType ( const SqlNode_t & tOption )
{
m_tItemOptions.m_sKNNType = ToStringUnescape(tOption).ToUpper();
if ( m_tItemOptions.m_sKNNType!="HNSW" )
{
m_sError.SetSprintf ( "Unknown KNN type '%s'", m_tItemOptions.m_sKNNType.cstr() );
return false;
}
return true;
}
bool DdlParser_c::AddItemOptionKNNDims ( const SqlNode_t & tOption )
{
CSphString sValue = ToStringUnescape(tOption);
m_tItemOptions.m_iKNNDims = strtoull ( sValue.cstr(), NULL, 10 );
m_tItemOptions.m_bKNNDimsSpecified = true;
return true;
}
bool DdlParser_c::AddItemOptionHNSWSimilarity ( const SqlNode_t & tOption )
{
CSphString sValue = ToStringUnescape(tOption).ToUpper();
if ( sValue=="L2" )
m_tItemOptions.m_eHNSWSimilarity = knn::HNSWSimilarity_e::L2;
else if ( sValue=="IP" )
m_tItemOptions.m_eHNSWSimilarity = knn::HNSWSimilarity_e::IP;
else if ( sValue=="COSINE" )
m_tItemOptions.m_eHNSWSimilarity = knn::HNSWSimilarity_e::COSINE;
else
{
m_sError.SetSprintf ( "Unknown HNSW similarity '%s'", sValue.cstr() );
return false;
}
m_tItemOptions.m_bHNSWSimilaritySpecified = true;
return true;
}
bool DdlParser_c::AddItemOptionHNSWM ( const SqlNode_t & tOption )
{
CSphString sValue = ToStringUnescape(tOption);
m_tItemOptions.m_iHNSWM = strtoull ( sValue.cstr(), NULL, 10 );
return true;
}
bool DdlParser_c::AddItemOptionHNSWEfConstruction ( const SqlNode_t & tOption )
{
CSphString sValue = ToStringUnescape(tOption);
m_tItemOptions.m_iHNSWEFConstruction = strtoull ( sValue.cstr(), NULL, 10 );
return true;
}
bool DdlParser_c::ConvertToAttrEngine ( const SqlNode_t & tEngine, AttrEngine_e & eEngine )
{
CSphString sEngine = ToStringUnescape(tEngine);
CSphString sEngineLowerCase = sEngine;
sEngineLowerCase.ToLower();
return StrToAttrEngine ( eEngine, AttrEngine_e::DEFAULT, sEngineLowerCase, m_sError );
}
void DdlParser_c::AddCreateTableOption ( const SqlNode_t & tName, const SqlNode_t & tValue )
{
assert(m_pStmt);
NameValueStr_t & tOpt = m_pStmt->m_tCreateTable.m_dOpts.Add();
ToString ( tOpt.m_sName, tName );
tOpt.m_sValue = ToStringUnescape(tValue);
tOpt.m_sName.ToLower();
}
void DdlParser_c::JoinClusterAt ( const SqlNode_t & tAt )
{
assert(m_pStmt);
m_pStmt->m_bClusterUpdateNodes = true;
m_pStmt->m_dCallOptNames.Add ( "at_node" );
SqlInsert_t & tVal = m_pStmt->m_dCallOptValues.Add();
tVal.m_iType = tAt.m_iType;
tVal.m_sVal = ToStringUnescape ( tAt );
}
void DdlParser_c::AddInsval ( CSphVector<SqlInsert_t> & dVec, const SqlNode_t & tNode )
{
SqlInsert_t & tIns = dVec.Add();
tIns.m_iType = tNode.m_iType;
tIns.CopyValueInt(tNode);
tIns.m_fVal = tNode.m_fValue;
if ( tIns.m_iType==TOK_QUOTED_STRING )
tIns.m_sVal = ToStringUnescape ( tNode );
tIns.m_pVals = tNode.m_pValues;
}
//////////////////////////////////////////////////////////////////////////
bool ParseDdl ( Str_t sQuery, CSphVector<SqlStmt_t>& dStmt, CSphString& sError )
{
if ( !IsFilled ( sQuery ) )
{
sError = "query was empty";
return false;
}
auto* sEnd = const_cast<char*> ( end ( sQuery ) );
sEnd[0] = 0; // prepare for yy_scan_buffer
sEnd[1] = 0; // this is ok because string allocates a small gap
return ParseResult_e::PARSE_OK == ParseDdlEx ( sQuery, dStmt, sError );
}
ParseResult_e ParseDdlEx ( Str_t sQuery, CSphVector<SqlStmt_t> & dStmt, CSphString & sError )
{
assert ( IsFilled ( sQuery ) );
DdlParser_c tParser { dStmt, sQuery.first, &sError };
yy3lex_init ( &tParser.m_pScanner );
YY_BUFFER_STATE tLexerBuffer = yy3_scan_buffer ( const_cast<char *>( sQuery.first ), sQuery.second+2, tParser.m_pScanner );
if ( !tLexerBuffer )
{
sError = "internal error: yy3_scan_buffer() failed";
return ParseResult_e::PARSE_ERROR;
}
int iRes = yyparse ( &tParser );
yy3_delete_buffer ( tLexerBuffer, tParser.m_pScanner );
yy3lex_destroy ( tParser.m_pScanner );
dStmt.Pop(); // last query is always dummy
if ( tParser.IsWrongSyntaxError() )
return ParseResult_e::PARSE_SYNTAX_ERROR;
return ( iRes || dStmt.IsEmpty() ) ? ParseResult_e::PARSE_ERROR : ParseResult_e::PARSE_OK;
}
| 15,663
|
C++
|
.cpp
| 426
| 34.553991
| 145
| 0.718018
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,936
|
searchdfields.cpp
|
manticoresoftware_manticoresearch/src/searchdfields.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxstd.h"
#include "searchdha.h"
struct FieldRequest_t
{
CSphString m_sIndexes;
CSphFixedVector<CSphString> m_dFieldNames { 0 };
CSphVector<DocID_t> m_dDocs { 0 };
};
struct FieldLoc_t
{
int m_iOff;
int m_iSize;
};
struct FieldBlob_t
{
CSphString m_sError;
CSphVector<BYTE> m_dBlob;
CSphVector<FieldLoc_t> m_dLocs;
};
struct DocHash_t : private OpenHashTable_T<DocID_t, int>
{
explicit DocHash_t ( int iElems ) : OpenHashTable_T<DocID_t,int> { iElems } {}
int Count () const { return (int)GetLength(); }
bool Exists ( DocID_t tId ) const { return ( Find ( tId )!=nullptr ); }
void Set ( DocID_t tId, int iOff ) { Acquire ( tId ) = iOff;}
using OpenHashTable_T<DocID_t,int>::Acquire;
using OpenHashTable_T<DocID_t,int>::Find;
};
struct ResLoc_t
{
DocID_t m_iDocid;
int m_iIndex;
};
struct RemoteFieldsAnswer_t : public iQueryResult, public FieldBlob_t
{
void Reset() final {}
bool HasWarnings() const final { return false; }
// request data, not owned
VecTraits_T<ResLoc_t> m_dResDocs;
int m_iTag=0;
// reply data
CSphVector<DocID_t> m_dDocs;
const BYTE * m_pFieldsRaw = nullptr; // not owned
};
struct GetFieldRequestBuilder_t : public RequestBuilder_i
{
const VecTraits_T<const CSphColumnInfo *>& m_dFieldCols;
explicit GetFieldRequestBuilder_t ( const VecTraits_T<const CSphColumnInfo *>& dFieldCols )
: m_dFieldCols ( dFieldCols )
{}
void BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const final
{
auto * pRes = (RemoteFieldsAnswer_t *)tAgent.m_pResult.get();
assert ( pRes );
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_GETFIELD, VER_COMMAND_GETFIELD );
tOut.SendString ( tAgent.m_tDesc.m_sIndexes.cstr() );
tOut.SendDword ( m_dFieldCols.GetLength() );
for ( auto* pFieldCol : m_dFieldCols )
tOut.SendString ( pFieldCol->m_sName.cstr() );
tOut.SendDword ( pRes->m_dResDocs.GetLength() );
for ( auto& iDoc : pRes->m_dResDocs )
tOut.SendUint64 ( iDoc.m_iDocid );
}
};
struct GetFieldReplyParser_t : public ReplyParser_i
{
bool ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & tAgent ) const final
{
auto * pReply = (RemoteFieldsAnswer_t *)tAgent.m_pResult.get();
assert ( pReply );
pReply->m_dDocs.Resize ( tReq.GetDword() );
for ( auto& tDoc : pReply->m_dDocs )
tDoc = tReq.GetUint64();
pReply->m_dLocs.Resize ( tReq.GetDword() );
for ( FieldLoc_t & tField : pReply->m_dLocs )
{
tField.m_iOff = tReq.GetDword();
tField.m_iSize = tReq.GetDword();
}
int iFieldsLen = tReq.GetDword();
if ( iFieldsLen )
tReq.GetBytesZerocopy( &pReply->m_pFieldsRaw, iFieldsLen );
return !tReq.GetError();
}
};
struct ProxyFieldRequestBuilder_t : public RequestBuilder_i
{
const FieldRequest_t & m_tArgs;
explicit ProxyFieldRequestBuilder_t ( const FieldRequest_t & tArgs )
: m_tArgs ( tArgs )
{}
void BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const final
{
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_GETFIELD, VER_COMMAND_GETFIELD );
tOut.SendString ( tAgent.m_tDesc.m_sIndexes.cstr() );
tOut.SendDword ( m_tArgs.m_dFieldNames.GetLength() );
ARRAY_FOREACH ( i, m_tArgs.m_dFieldNames )
tOut.SendString ( m_tArgs.m_dFieldNames[i].cstr() );
tOut.SendDword ( m_tArgs.m_dDocs.GetLength() );
for ( int i = 0, iDocs = m_tArgs.m_dDocs.GetLength (); i<iDocs; ++i )
tOut.SendUint64 ( m_tArgs.m_dDocs[i] );
}
};
namespace { // static
bool GetIndexes ( const CSphString & sIndexes, CSphString & sError, StrVec_t & dLocal, VecRefPtrsAgentConn_t & dRemotes )
{
StrVec_t dNames;
ParseIndexList ( sIndexes, dNames );
for ( const CSphString & sIndex : dNames )
{
auto pLocal = GetServed ( sIndex );
auto pDist = GetDistr ( sIndex );
if ( !pLocal && !pDist )
{
sError.SetSprintf ( "no such table %s", sIndex.cstr() );
return false;
}
if ( pLocal )
{
dLocal.Add ( sIndex );
} else
{
for ( const auto& pAgent : pDist->m_dAgents )
{
auto * pConn = new AgentConn_t;
pConn->SetMultiAgent ( pAgent );
pConn->m_iMyConnectTimeoutMs = pDist->GetAgentConnectTimeoutMs();
pConn->m_iMyQueryTimeoutMs = pDist->GetAgentQueryTimeoutMs();
pConn->m_pResult = std::make_unique<RemoteFieldsAnswer_t>();
dRemotes.Add ( pConn );
}
dLocal.Append ( pDist->m_dLocal );
}
}
dLocal.Uniq();
return true;
}
bool GetFieldFromLocal ( const CSphString & sIndexName, const FieldRequest_t & tArgs, int64_t iSessionID,
DocHash_t & hFetchedDocs, FieldBlob_t & tRes )
{
auto pServed = GetServed ( sIndexName );
if ( !pServed )
{
tRes.m_sError.SetSprintf ( "no such table %s", sIndexName.cstr() );
return false;
}
auto& tRefCrashQuery = GlobalCrashQueryGetRef();
tRefCrashQuery.m_dIndex = { sIndexName.cstr(), sIndexName.Length() };
RIdx_c pIndex { pServed };
pIndex->CreateReader ( iSessionID );
// collect fieldids and remap as:
// for fieldnames: [fake four third five]
// fieldids is: [3,4,5]
// remap is: [-1,1,0,2] -> as field[-1] absent, field[1] is four, field[0] is third, field[2] is five
CSphFixedVector<int> dFieldRemap ( tArgs.m_dFieldNames.GetLength() );
CSphVector<int> dFieldIds;
{
using fld_t = std::pair<int,int>;
CSphVector<fld_t> dFields;
ARRAY_CONSTFOREACH ( i, tArgs.m_dFieldNames )
{
int iField = pIndex->GetFieldId ( tArgs.m_dFieldNames[i], DOCSTORE_TEXT );
if ( iField==-1 )
dFieldRemap[i] = -1;
else
dFields.Add ( { iField, i } );
}
dFields.Sort ( Lesser ( [] ( fld_t & l, fld_t & r ) { return l.first<r.first; } ) );
dFieldIds.Resize ( dFields.GetLength () );
ARRAY_CONSTFOREACH ( i, dFields )
{
dFieldIds[i] = dFields[i].first;
dFieldRemap[dFields[i].second] = i;
}
}
DocstoreDoc_t tDoc;
for ( DocID_t tDocid : tArgs.m_dDocs )
{
if ( hFetchedDocs.Exists ( tDocid ) )
continue;
if ( !pIndex->GetDoc ( tDoc, tDocid, &dFieldIds, iSessionID, false ) )
continue;
assert ( tDoc.m_dFields.GetLength()==dFieldIds.GetLength() );
hFetchedDocs.Set ( tDocid, tRes.m_dLocs.GetLength() );
for ( int iField : dFieldRemap )
{
if ( iField==-1 )
{
tRes.m_dLocs.Add ( { tRes.m_dBlob.GetLength (), 0 } );
continue;
}
assert ( iField<tDoc.m_dFields.GetLength() );
const CSphVector<BYTE> & tFieldData = tDoc.m_dFields[iField];
tRes.m_dLocs.Add ( { tRes.m_dBlob.GetLength (), (int) tFieldData.GetLengthBytes () } );
tRes.m_dBlob.Append ( tFieldData );
}
}
return true;
}
bool GetFieldFromDist ( VecRefPtrsAgentConn_t & dRemotes, const FieldRequest_t & tArgs, DocHash_t & hFetchedDocs, FieldBlob_t & tRes )
{
const int iFieldsCount = tArgs.m_dFieldNames.GetLength();
for ( AgentConn_t * pAgent : dRemotes )
{
auto * pReply = (RemoteFieldsAnswer_t *) pAgent->m_pResult.get ();
if ( !pAgent->m_bSuccess )
{
if ( !pAgent->m_sFailure.IsEmpty() )
tRes.m_sError.SetSprintf ( "agent %s: %s", pAgent->m_tDesc.GetMyUrl().cstr(), pAgent->m_sFailure.cstr() );
continue;
}
ARRAY_FOREACH ( iDoc, pReply->m_dDocs )
{
DocID_t tDocid = pReply->m_dDocs[iDoc];
// already got fields for the document
if ( hFetchedDocs.Exists(tDocid) )
continue;
hFetchedDocs.Set ( tDocid, tRes.m_dLocs.GetLength () );
auto dLocs = pReply->m_dLocs.Slice ( iDoc * iFieldsCount, iFieldsCount );
for ( const auto& tSrcField : dLocs )
{
tRes.m_dLocs.Add ( { tRes.m_dBlob.GetLength (), tSrcField.m_iSize } );
tRes.m_dBlob.Append ( pReply->m_pFieldsRaw+tSrcField.m_iOff, tSrcField.m_iSize );
}
}
}
return true;
}
bool GetFields ( const FieldRequest_t & tReq, FieldBlob_t & tRes, DocHash_t & hFetchedDocs )
{
if ( tReq.m_dDocs.IsEmpty() )
return true;
StrVec_t dLocals;
VecRefPtrsAgentConn_t dRemotes;
if ( !GetIndexes ( tReq.m_sIndexes, tRes.m_sError, dLocals, dRemotes ) )
return false;
if ( dLocals.IsEmpty() && dRemotes.IsEmpty() )
return true;
bool bOkLocal = true;
bool bOkRemote = true;
CSphRefcountedPtr<RemoteAgentsObserver_i> pDistReporter { nullptr };
std::unique_ptr<RequestBuilder_i> pDistReq;
std::unique_ptr<ReplyParser_i> pDistReply;
if ( !dRemotes.IsEmpty () )
{
pDistReq = std::make_unique<ProxyFieldRequestBuilder_t> ( tReq );
pDistReply = std::make_unique<GetFieldReplyParser_t>();
pDistReporter = GetObserver();
ScheduleDistrJobs ( dRemotes, pDistReq.get(), pDistReply.get(), pDistReporter.Ptr() );
}
{
DocstoreSession_c tSession;
for ( const auto & sLocal : dLocals )
{
if ( !GetFieldFromLocal ( sLocal, tReq, tSession.GetUID(), hFetchedDocs, tRes ) )
{
bOkLocal = false;
break;
}
// early out on fields fetched for all docs
if ( hFetchedDocs.Count ()==tReq.m_dDocs.GetLength() )
break;
}
}
if ( !dRemotes.IsEmpty() )
{
pDistReporter->Finish();
bOkRemote = GetFieldFromDist ( dRemotes, tReq, hFetchedDocs, tRes );
}
return ( bOkLocal && bOkRemote );
}
FieldRequest_t ParseAPICommandGetfield ( InputBuffer_c & tReq )
{
FieldRequest_t tArgs;
// parse remote request
tArgs.m_sIndexes = tReq.GetString ();
tArgs.m_dFieldNames.Reset ( tReq.GetDword () );
for ( auto & sName : tArgs.m_dFieldNames )
sName = tReq.GetString ();
tArgs.m_dDocs.Resize ( tReq.GetDword () );
for ( auto & tDocID : tArgs.m_dDocs )
tDocID = tReq.GetUint64 ();
return tArgs;
}
void SendAPICommandGetfieldAnswer ( ISphOutputBuffer & tOut, FieldRequest_t& tRequest,
const FieldBlob_t& tRes, const DocHash_t& _tFetched )
{
auto tReply = APIAnswer ( tOut, VER_COMMAND_GETFIELD );
auto & tFetched = const_cast<DocHash_t &>(_tFetched); // non-const need for Acquire()
auto iDocsCount = tFetched.Count();
if ( !iDocsCount )
{
tOut.SendDword ( 0 ); // docs array
tOut.SendDword ( 0 ); // locators array
tOut.SendDword ( 0 ); // fields blob array
return;
}
auto& dDocs = tRequest.m_dDocs;
// send doclist and simultaneously wipe out absend docs
// note that because of wiping doc's order will be broken!
tOut.SendDword ( iDocsCount );
ARRAY_FOREACH ( i, dDocs )
{
if ( tFetched.Exists ( dDocs[i] ) )
tOut.SendUint64 ( dDocs[i] );
else
dDocs.RemoveFast(i--);
}
assert ( iDocsCount==dDocs.GetLength () );
auto iFields = tRequest.m_dFieldNames.GetLength ();
tOut.SendDword ( iDocsCount * iFields );
for ( DocID_t tDoc : dDocs )
{
int iOff = tFetched.Acquire ( tDoc );
for ( int i=0; i<iFields; ++i )
{
tOut.SendDword ( tRes.m_dLocs[iOff+i].m_iOff );
tOut.SendDword ( tRes.m_dLocs[iOff+i].m_iSize );
}
}
tOut.SendArray ( tRes.m_dBlob );
}
int GetDocIDOffset ( const AggrResult_t& tRes )
{
const CSphColumnInfo* pId = tRes.m_tSchema.GetAttr ( sphGetDocidName() );
if ( pId )
return pId->m_tLocator.m_iBitOffset >> ROWITEM_SHIFT;
return 0;
}
// fill vec of ResLoc_t with remote matches from tRes, sorted by DocID
int CollectUntaggedDocs ( CSphVector<ResLoc_t>& dIds, const AggrResult_t & tRes, const VecTraits_T<CSphMatch> & dMatches )
{
assert ( !tRes.m_bTagsAssigned );
assert ( tRes.m_bSingle );
if ( !tRes.m_dResults.First ().m_bTag ) // process only remote resultsets
return -1;
DocID_t iLastDocID = DOCID_MIN;
bool bNeedSort = false;
auto iIdOffset = GetDocIDOffset ( tRes );
ARRAY_CONSTFOREACH( i, dMatches )
{
const CSphMatch & tMatch = dMatches[i];
ResLoc_t & tDoc = dIds.Add ();
tDoc.m_iDocid = sphGetDocID ( tMatch.m_pDynamic + iIdOffset );
tDoc.m_iIndex = i;
if ( bNeedSort )
continue;
bNeedSort = tDoc.m_iDocid<iLastDocID;
iLastDocID = tDoc.m_iDocid;
}
if ( bNeedSort )
dIds.Sort ( Lesser ( [] ( const ResLoc_t & a, const ResLoc_t & b ) { return a.m_iDocid<b.m_iDocid; } ) );
return tRes.m_dResults.First ().m_iTag;
}
int TagOf ( const ResLoc_t& tLoc, const VecTraits_T<CSphMatch> & dMatches )
{
assert ( tLoc.m_iIndex>=0 && tLoc.m_iIndex<dMatches.GetLength () );
return dMatches[tLoc.m_iIndex].m_iTag;
}
// fill vec of ResLoc_t with remote matches from tRes, sorted by tags, inside groups sorted by DocID
void CollectTaggedDocs ( CSphVector<ResLoc_t> & dIds, const AggrResult_t & tRes, const VecTraits_T<CSphMatch> & dMatches )
{
assert ( tRes.m_bTagsAssigned );
assert ( tRes.m_bSingle );
auto iIdOffset = GetDocIDOffset ( tRes );
ARRAY_CONSTFOREACH( i, dMatches )
{
const CSphMatch & tMatch = dMatches[i];
if ( !tRes.m_dResults[tMatch.m_iTag].m_bTag ) // process only matches came from remotes
continue;
ResLoc_t & tDoc = dIds.Add ();
tDoc.m_iDocid = sphGetDocID ( tMatch.m_pDynamic + iIdOffset );
tDoc.m_iIndex = i;
}
dIds.Sort ( Lesser ( [&dMatches] ( const ResLoc_t & a, const ResLoc_t & b )
{
auto iTagA = TagOf ( a, dMatches );
auto iTagB = TagOf ( b, dMatches );
if ( iTagA==iTagB )
return a.m_iDocid<b.m_iDocid;
return iTagA<iTagB;
}));
}
void AddRange ( CSphVector<RemoteFieldsAnswer_t> & dRanges, const VecTraits_T<ResLoc_t> & dIds, int iTag )
{
if ( dIds.IsEmpty() )
return;
auto & dRange = dRanges.Add();
dRange.m_dResDocs = dIds;
dRange.m_iTag = iTag;
}
// fill dRanges with chunks of dIds, grouped by same tag
void CollectTaggedRanges ( CSphVector<RemoteFieldsAnswer_t> & dRanges, const VecTraits_T<ResLoc_t> & dIds, const VecTraits_T<CSphMatch> & dMatches )
{
int iStart = 0;
if ( dIds.IsEmpty () )
return;
auto iStartTag = TagOf ( dIds[iStart], dMatches );
ARRAY_CONSTFOREACH ( i, dIds )
{
if ( TagOf ( dIds[i], dMatches )!=iStartTag )
{
AddRange ( dRanges, dIds.Slice ( iStart, i-iStart ), iStartTag );
iStart = i;
iStartTag = TagOf ( dIds[iStart], dMatches );
}
}
AddRange ( dRanges, dIds.Slice ( iStart, dIds.GetLength()-iStart ), iStartTag );
}
// fill dIds with remote matches and return ranges of them, grouped by tag. First match of every chunk has valid tag
CSphVector<RemoteFieldsAnswer_t> ExtractRanges ( CSphVector<ResLoc_t>& dIds, const AggrResult_t & tRes, const VecTraits_T<CSphMatch>& dMatches )
{
CSphVector<RemoteFieldsAnswer_t> dRanges;
if ( tRes.m_bTagsAssigned )
{
CollectTaggedDocs ( dIds, tRes, dMatches );
CollectTaggedRanges ( dRanges, dIds, dMatches );
}
else
{
int iOnlyTag = CollectUntaggedDocs( dIds, tRes, dMatches );
AddRange ( dRanges, dIds, iOnlyTag ); // only one range
}
return dRanges;
}
// create agents
VecRefPtrsAgentConn_t GetAgents( const VecTraits_T<RemoteFieldsAnswer_t>& dRangesIds, AggrResult_t & tRes )
{
assert ( tRes.m_bIdxByTag );
VecRefPtrsAgentConn_t dAgents;
dAgents.Reserve ( dRangesIds.GetLength () );
for ( auto & dRange : dRangesIds )
{
const AgentConn_t * pDesc = tRes.m_dResults[dRange.m_iTag].Agent ();
assert ( pDesc );
auto * pAgent = new AgentConn_t;
pAgent->m_tDesc.CloneFrom ( pDesc->m_tDesc );
pAgent->m_iMyConnectTimeoutMs = pDesc->m_iMyConnectTimeoutMs;
pAgent->m_iMyQueryTimeoutMs = pDesc->m_iMyQueryTimeoutMs;
pAgent->m_pResult.reset ( &dRange ); // fixme! that is hack with reset/release, not very good fit to unique_ptr
dAgents.Add ( pAgent );
}
return dAgents;
}
void FillDocs ( VecTraits_T<CSphMatch> & dMatches, RemoteFieldsAnswer_t& dReply,
const VecTraits_T<const CSphColumnInfo *>& dFieldCols )
{
auto & dResDocs = dReply.m_dResDocs; // directly passed from source, sorted by DocID
auto & dDocs = dReply.m_dDocs; // received, m.b. different (shrinked and unordered)
int iStride = dFieldCols.GetLength ();
if ( dDocs.IsEmpty () )
return;
// docs and locators placed in unknown order, but for merging we need them ordered.
CSphFixedVector<int> dOrd { dDocs.GetLength () };
ARRAY_CONSTFOREACH( i, dOrd )
dOrd[i] = i;
dOrd.Sort ( Lesser ( [&dDocs] ( int l, int r ) { return dDocs[l]<dDocs[r]; } ) );
int iDocs = dDocs.GetLength();
int iDoc = 0;
int iRecvDoc = dOrd[iDoc];
for ( auto & dResDoc : dResDocs )
{
if ( dResDoc.m_iDocid<dDocs[iRecvDoc] )
continue;
assert ( dResDoc.m_iDocid==dDocs[iRecvDoc] );
// found matched docs
CSphMatch & tMatch = dMatches[dResDoc.m_iIndex];
auto dLocators = dReply.m_dLocs.Slice ( iStride * iDoc, iStride );
ARRAY_CONSTFOREACH( i, dFieldCols )
{
BYTE * pPacked = sphPackPtrAttr ( { dReply.m_pFieldsRaw+dLocators[i].m_iOff, dLocators[i].m_iSize } );
pPacked = (BYTE*)ExchangeAttr ( tMatch, dFieldCols[i]->m_tLocator, (SphAttr_t)pPacked );
sphDeallocatePacked ( pPacked );
}
++iDoc;
if ( iDoc>=iDocs )
break;
iRecvDoc = dOrd[iDoc];
}
}
CSphVector<const CSphColumnInfo *> GetStoredColumnList ( const CSphSchema & tSchema )
{
CSphVector<const CSphColumnInfo *> dFieldCols;
for ( int i = 0, iAttrsCount = tSchema.GetAttrsCount (); i<iAttrsCount; ++i )
{
const CSphColumnInfo & tCol = tSchema.GetAttr ( i );
if ( IsNotRealAttribute ( tCol ) )
dFieldCols.Add ( &tCol );
}
return dFieldCols;
}
} // static namespace
void HandleCommandGetField ( ISphOutputBuffer & tOut, WORD uVer, InputBuffer_c & tReq )
{
if ( !CheckCommandVersion ( uVer, VER_COMMAND_GETFIELD, tOut ) )
return;
// parse request
auto tRequest = ParseAPICommandGetfield ( tReq );
if ( tReq.GetError() )
{
SendErrorReply ( tOut, "invalid or truncated request" );
return;
}
// fetch stored fields
DocHash_t tFetched ( tRequest.m_dDocs.GetLength() );
FieldBlob_t tRes;
if ( !GetFields ( tRequest, tRes, tFetched ) )
{
SendErrorReply ( tOut, "%s", tRes.m_sError.cstr() );
return;
}
SendAPICommandGetfieldAnswer ( tOut, tRequest, tRes, tFetched );
}
void RemotesGetField ( AggrResult_t & tRes, const CSphQuery & tQuery )
{
assert ( tRes.m_bSingle );
assert ( tRes.m_bOneSchema );
// TODO!!! try to start fetch prior to sorting
assert ( tRes.m_bOneSchema );
assert ( tRes.m_bTagsCompacted );
assert ( tRes.m_bIdxByTag );
assert ( tRes.m_bSingle );
auto dFieldCols = GetStoredColumnList ( tRes.m_tSchema );
// early reject in case no stored fields found
if ( dFieldCols.IsEmpty() )
return;
int iOffset = Max ( tQuery.m_iOffset, tQuery.m_iOuterOffset );
int iCount = ( tQuery.m_iOuterLimit ? tQuery.m_iOuterLimit : tQuery.m_iLimit );
auto dMatches = tRes.m_dResults.First ().m_dMatches.Slice ( iOffset, iCount );
// extract ranges and early return in case no remote matches
CSphVector<ResLoc_t> dIds;
auto dRangesIds = ExtractRanges ( dIds, tRes, dMatches );
if ( dRangesIds.IsEmpty() )
return;
auto dAgents = GetAgents ( dRangesIds, tRes );
assert ( dAgents.GetLength ()==dRangesIds.GetLength () );
// connect to remote agents and query them
GetFieldRequestBuilder_t tBuilder ( dFieldCols );
GetFieldReplyParser_t tParser;
PerformRemoteTasks ( dAgents, &tBuilder, &tParser );
StringBuilder_c sError { "," };
if ( !tRes.m_sWarning.IsEmpty () )
sError << tRes.m_sWarning;
for ( auto* pAgent : dAgents )
{
auto & dReply = *(RemoteFieldsAnswer_t *) pAgent->m_pResult.release ();
if ( pAgent->m_bSuccess )
FillDocs ( dMatches, dReply, dFieldCols );
else if ( !pAgent->m_sFailure.IsEmpty() )
sError.Sprintf ( "agent %s: %s", pAgent->m_tDesc.GetMyUrl().cstr(), pAgent->m_sFailure.cstr() );
}
sError.MoveTo ( tRes.m_sWarning );
}
| 19,223
|
C++
|
.cpp
| 562
| 31.626335
| 148
| 0.702666
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,937
|
jieba.cpp
|
manticoresoftware_manticoresearch/src/jieba.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
// this file is should build ONLY when 'WITH_JIEBA" defined
#include "jieba.h"
STATIC_ASSERT ( WITH_JIEBA, SHOULD_NOT_BUILD_WIHTOUT_WITH_JIEBA_DEFINITION );
#include "cjkpreprocessor.h"
#include "Jieba.hpp"
class JiebaPreprocessor_c : public CJKPreprocessor_c
{
public:
JiebaPreprocessor_c ( JiebaMode_e eMode, bool bHMM, const CSphString & sJiebaUserDictPath );
bool Init ( CSphString & sError ) override;
CJKPreprocessor_c * Clone ( const FieldFilterOptions_t * pOptions ) override { return new JiebaPreprocessor_c ( pOptions && pOptions->m_eJiebaMode!=JiebaMode_e::NONE ? pOptions->m_eJiebaMode : m_eMode, m_bHMM, m_sJiebaUserDictPath, m_pJieba ); }
protected:
void ProcessBuffer ( const BYTE * pBuffer, int iLength ) override;
const BYTE * GetNextToken ( int & iTokenLen ) override;
private:
std::shared_ptr<cppjieba::Jieba> m_pJieba;
std::vector<cppjieba::Word> m_dWords;
cppjieba::CutContext m_tCtx;
int m_iToken = 0;
JiebaMode_e m_eMode;
bool m_bHMM = true;
CSphString m_sJiebaUserDictPath;
JiebaPreprocessor_c ( JiebaMode_e eMode, bool bHMM, const CSphString & sJiebaUserDictPath, std::shared_ptr<cppjieba::Jieba> pJieba );
};
JiebaPreprocessor_c::JiebaPreprocessor_c ( JiebaMode_e eMode, bool bHMM, const CSphString & sJiebaUserDictPath )
: m_eMode ( eMode )
, m_bHMM ( bHMM )
, m_sJiebaUserDictPath ( sJiebaUserDictPath )
{}
JiebaPreprocessor_c::JiebaPreprocessor_c ( JiebaMode_e eMode, bool bHMM, const CSphString & sJiebaUserDictPath, std::shared_ptr<cppjieba::Jieba> pJieba )
: m_pJieba ( pJieba )
, m_eMode ( eMode )
, m_bHMM ( bHMM )
, m_sJiebaUserDictPath ( sJiebaUserDictPath )
{}
bool JiebaPreprocessor_c::Init ( CSphString & sError )
{
// skip init if reusing existing jieba
if ( m_pJieba )
return true;
CSphString sJiebaPath = GetJiebaDataDir();
enum class JiebaFiles_e : int
{
DICT = 0,
HMM,
USER_DICT,
IDF,
STOP_WORD,
TOTAL
};
CSphString dJiebaFiles[] =
{
"jieba.dict.utf8",
"hmm_model.utf8",
"user.dict.utf8",
"idf.utf8",
"stop_words.utf8"
};
for ( auto & i : dJiebaFiles )
i.SetSprintf ( "%s/%s", sJiebaPath.cstr(), i.cstr() );
if ( !m_sJiebaUserDictPath.IsEmpty() )
dJiebaFiles[(int)JiebaFiles_e::USER_DICT] = m_sJiebaUserDictPath;
for ( auto & i : dJiebaFiles )
if ( !sphIsReadable ( i.cstr() ) )
{
sError.SetSprintf ( "Error initializing Jieba: unable to read '%s'", i.cstr() );
return false;
}
// fixme! jieba responds to load errors with abort() call
m_pJieba = std::make_shared<cppjieba::Jieba> ( dJiebaFiles[(int)JiebaFiles_e::DICT].cstr(), dJiebaFiles[(int)JiebaFiles_e::HMM].cstr(), dJiebaFiles[(int)JiebaFiles_e::USER_DICT].cstr(), dJiebaFiles[(int)JiebaFiles_e::IDF].cstr(), dJiebaFiles[(int)JiebaFiles_e::STOP_WORD].cstr() );
return true;
}
void JiebaPreprocessor_c::ProcessBuffer ( const BYTE * pBuffer, int iLength )
{
m_dWords.resize(0);
switch ( m_eMode )
{
case JiebaMode_e::ACCURATE:
m_pJieba->Cut ( { (const char*)pBuffer, (size_t)iLength }, m_dWords, m_tCtx, m_bHMM );
break;
case JiebaMode_e::FULL:
m_pJieba->CutAll ( { (const char*)pBuffer, (size_t)iLength }, m_dWords, m_tCtx );
break;
case JiebaMode_e::SEARCH:
m_pJieba->CutForSearch ( { (const char*)pBuffer, (size_t)iLength }, m_dWords, m_tCtx, m_bHMM );
break;
default:
break;
}
m_iToken = 0;
}
const BYTE * JiebaPreprocessor_c::GetNextToken ( int & iTokenLen )
{
if ( (size_t)m_iToken>=m_dWords.size() )
return nullptr;
const auto & tWord = m_dWords[m_iToken++];
iTokenLen = tWord.word.length();
return (const BYTE*)tWord.word.c_str();
}
//////////////////////////////////////////////////////////////////////////
bool CheckConfigJieba ( CSphIndexSettings & tSettings, CSphString & sError )
{
return true;
}
bool StrToJiebaMode ( JiebaMode_e & eMode, const CSphString & sValue, CSphString & sError )
{
if ( sValue=="accurate" )
eMode = JiebaMode_e::ACCURATE;
else if ( sValue=="full" )
eMode = JiebaMode_e::FULL;
else if ( sValue=="search" )
eMode = JiebaMode_e::SEARCH;
else
{
sError.SetSprintf ( "unknown jieba mode: %s", sValue.cstr() );
return false;
}
return true;
}
bool SpawnFilterJieba ( std::unique_ptr<ISphFieldFilter> & pFieldFilter, const CSphIndexSettings & tSettings, const CSphTokenizerSettings & tTokSettings, const char * szIndex, FilenameBuilder_i * pFilenameBuilder, CSphString & sError )
{
if ( tSettings.m_ePreprocessor!=Preprocessor_e::JIEBA )
return true;
CSphString sJiebaUserDictPath = tSettings.m_sJiebaUserDictPath;
if ( !sJiebaUserDictPath.IsEmpty() && pFilenameBuilder )
sJiebaUserDictPath = pFilenameBuilder->GetFullPath(sJiebaUserDictPath);
auto pFilterICU = CreateFilterCJK ( std::move ( pFieldFilter ), std::make_unique<JiebaPreprocessor_c> ( tSettings.m_eJiebaMode, tSettings.m_bJiebaHMM, sJiebaUserDictPath ), tTokSettings.m_sBlendChars.cstr(), sError );
if ( !sError.IsEmpty() )
{
sError.SetSprintf ( "table '%s': Error initializing Jieba: %s", szIndex, sError.cstr() );
return false;
}
pFieldFilter = std::move ( pFilterICU );
return true;
}
| 5,579
|
C++
|
.cpp
| 146
| 35.869863
| 282
| 0.716116
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,938
|
snippetindex.cpp
|
manticoresoftware_manticoresearch/src/snippetindex.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "snippetindex.h"
#include "sphinxint.h"
struct KeywordCmp_t
{
const char * m_pBase;
explicit KeywordCmp_t ( const char * pBase )
: m_pBase ( pBase )
{}
inline int Cmp ( const SnippetsDocIndex_c::Keyword_t & a, const SnippetsDocIndex_c::Keyword_t & b ) const
{
assert ( m_pBase );
return strncmp ( m_pBase + a.m_iWord, m_pBase + b.m_iWord, 3*SPH_MAX_WORD_LEN+4 );
}
inline bool IsLess ( const SnippetsDocIndex_c::Keyword_t & a, const SnippetsDocIndex_c::Keyword_t & b ) const
{
int iCmp = Cmp ( a, b );
if ( iCmp==0 )
return a.m_iQueryPos<b.m_iQueryPos;
else
return iCmp<0;
}
inline bool IsSameTerm ( const SnippetsDocIndex_c::Keyword_t & a, const SnippetsDocIndex_c::Keyword_t & b ) const
{
return Cmp ( a, b )==0;
}
};
struct TermCmp_t
{
inline bool IsLess ( const SnippetsDocIndex_c::Term_t & a, const SnippetsDocIndex_c::Term_t & b ) const
{
if ( a.m_iWordId==b.m_iWordId )
return a.m_iQueryPos<b.m_iQueryPos;
else
return a.m_iWordId<b.m_iWordId;
}
bool IsSameTerm ( const SnippetsDocIndex_c::Term_t & a, const SnippetsDocIndex_c::Term_t & b ) const
{
return ( a.m_iWordId==b.m_iWordId );
}
};
//////////////////////////////////////////////////////////////////////////
SnippetsDocIndex_c::SnippetsDocIndex_c ( const XQQuery_t & tQuery )
: m_uLastPos ( 0 )
, m_tQuery ( tQuery )
{}
void SnippetsDocIndex_c::SetupHits ()
{
m_dDocHits.Resize ( m_dTerms.GetLength() + m_dStars.GetLength() );
m_uLastPos = 0;
}
bool SnippetsDocIndex_c::MatchStar ( const Keyword_t & tTok, const BYTE * sWord ) const
{
assert ( tTok.m_bStar );
const BYTE * sKeyword = m_dStarBuffer.Begin() + tTok.m_iWord;
const char * sWildcard = (const char*) sKeyword;
int dWildcard [ SPH_MAX_WORD_LEN + 1 ];
int * pWildcard = ( sphIsUTF8 ( sWildcard ) && sphUTF8ToWideChar ( sWildcard, dWildcard, SPH_MAX_WORD_LEN ) ) ? dWildcard : NULL;
return sphWildcardMatch ( (const char*)sWord, (const char*)sKeyword, pWildcard );
}
int SnippetsDocIndex_c::FindWord ( SphWordID_t iWordID, const BYTE * sWord, int iWordLen ) const
{
const Term_t * pQueryTerm = iWordID ? m_dTerms.BinarySearch ( bind ( &Term_t::m_iWordId ), iWordID ) : NULL;
if ( pQueryTerm )
return pQueryTerm->m_iQueryPos;
if ( sWord && iWordLen )
ARRAY_FOREACH ( i, m_dStars )
if ( MatchStar ( m_dStars[i], sWord ) )
return m_dStars[i].m_iQueryPos;
return -1;
}
int SnippetsDocIndex_c::FindStarred ( const char * sWord ) const
{
if ( !sWord )
return -1;
const BYTE * pBuf = m_dStarBuffer.Begin();
auto iLen = (int) strlen ( sWord );
ARRAY_FOREACH ( i, m_dStars )
{
const Keyword_t & tTok = m_dStars[i];
if ( tTok.m_iLength==iLen && tTok.m_bStar && memcmp ( pBuf+tTok.m_iWord, sWord, iLen )==0 )
return m_dStars[i].m_iQueryPos;
}
return -1;
}
void SnippetsDocIndex_c::AddHits ( SphWordID_t iWordID, const BYTE * sWord, int iWordLen, DWORD uPosition )
{
assert ( m_dDocHits.GetLength()==m_dTerms.GetLength()+m_dStars.GetLength() );
// FIXME!!! replace to 6well formed full-blown infix keyword dict
const Term_t * pQueryTerm = iWordID ? m_dTerms.BinarySearch ( bind ( &Term_t::m_iWordId ), iWordID ) : NULL;
if ( pQueryTerm )
{
int iQPos = pQueryTerm->m_iQueryPos;
if ( !m_dDocHits[iQPos].GetLength() || DWORD( m_dDocHits[iQPos].Last() )!=uPosition )
m_dDocHits [iQPos].Add ( uPosition );
// might add hit to star hit-list too
if ( !m_dStarred.BinarySearch ( iWordID ) )
return;
}
if ( sWord && iWordLen )
{
ARRAY_FOREACH ( i, m_dStars )
{
if ( MatchStar ( m_dStars[i], sWord ) )
{
int iQPos = m_dStars[i].m_iQueryPos;
if ( !m_dDocHits[iQPos].GetLength() || DWORD ( m_dDocHits[iQPos].Last() )!=uPosition )
m_dDocHits [iQPos].Add ( uPosition );
}
}
}
}
template<typename T>
void TermShiftDownQpos ( CSphVector<T> & dTerms, const CSphVector<int> & dRemovedQPos )
{
if ( !dRemovedQPos.GetLength() )
return;
ARRAY_FOREACH ( i, dTerms )
{
int iCurQPos = dTerms[i].m_iQueryPos;
int iRemoved = 0;
while ( iRemoved<dRemovedQPos.GetLength() && iCurQPos>dRemovedQPos[iRemoved] )
iRemoved++;
dTerms[i].m_iQueryPos = iCurQPos - iRemoved;
}
}
template<typename T, typename CMP>
void TermRemoveDup ( CSphVector<T> & dTerms, CSphVector<int> & dRemovedQPos, const CMP & tCmp )
{
assert ( dTerms.GetLength()>1 );
int iSrc = 1, iDst = 1;
while ( iSrc<dTerms.GetLength() )
{
if ( tCmp.IsSameTerm ( dTerms[iDst-1], dTerms[iSrc] ) )
{
dRemovedQPos.Add ( dTerms[iSrc].m_iQueryPos );
iSrc++;
} else
{
dTerms[iDst++] = dTerms[iSrc++];
}
}
dTerms.Resize ( iDst );
}
void SnippetsDocIndex_c::ParseQuery ( const DictRefPtr_c& pDict, DWORD eExtQuerySPZ )
{
int iQPos = 0;
iQPos = ExtractWords ( m_tQuery.m_pRoot, pDict, iQPos );
if ( eExtQuerySPZ & SPH_SPZ_SENTENCE )
{
strncpy ( (char *)m_sTmpWord, MAGIC_WORD_SENTENCE, sizeof(m_sTmpWord)-1 );
AddWord ( pDict->GetWordID ( m_sTmpWord ), (int) strlen ( (char*)m_sTmpWord ), iQPos );
iQPos++;
}
if ( eExtQuerySPZ & SPH_SPZ_PARAGRAPH )
{
strncpy ( (char *)m_sTmpWord, MAGIC_WORD_PARAGRAPH, sizeof(m_sTmpWord)-1 );
AddWord ( pDict->GetWordID ( m_sTmpWord ), (int) strlen ( (char*)m_sTmpWord ), iQPos );
iQPos++;
}
// should be in sync with ExtRanker_c constructor
ARRAY_FOREACH ( i, m_tQuery.m_dZones )
{
snprintf ( (char *)m_sTmpWord, sizeof(m_sTmpWord)-1, "%c%s", MAGIC_CODE_ZONE, m_tQuery.m_dZones[i].cstr() );
AddWord ( pDict->GetWordID ( m_sTmpWord ), (int) strlen ( (char*)m_sTmpWord ), iQPos );
iQPos++;
}
assert ( !m_dStars.GetLength() || m_dStarBuffer.GetLength() );
// all ok, remove dupes but keep their positions (needed to calculate LCS)
CSphVector<int> dRemovedQPos;
if ( m_dTerms.GetLength()>1 )
{
TermCmp_t tCmp;
m_dTerms.Sort ( tCmp );
TermRemoveDup ( m_dTerms, dRemovedQPos, tCmp );
}
if ( m_dStars.GetLength()>1 )
{
KeywordCmp_t tCmp ( (const char *)m_dStarBuffer.Begin() );
m_dStars.Sort ( tCmp );
TermRemoveDup ( m_dStars, dRemovedQPos, tCmp );
}
if ( dRemovedQPos.GetLength() )
{
dRemovedQPos.Sort();
TermShiftDownQpos ( m_dTerms, dRemovedQPos );
TermShiftDownQpos ( m_dStars, dRemovedQPos );
}
// per qpos weights
m_dQposToWeight.Resize ( m_dTerms.GetLength() + m_dStars.GetLength() );
#ifndef NDEBUG
m_dQposToWeight.Fill ( -1 );
#endif
ARRAY_FOREACH ( i, m_dTerms )
m_dQposToWeight[m_dTerms[i].m_iQueryPos] = m_dTerms[i].m_iWeight;
ARRAY_FOREACH ( i, m_dStars )
m_dQposToWeight[m_dStars[i].m_iQueryPos] = m_dStars[i].m_iWeight;
#ifndef NDEBUG
bool bFilled = m_dQposToWeight.any_of ( [] ( int iWeight ) { return -1==iWeight; } );
assert ( !bFilled );
#endif
// plain terms could also match as starred terms
if ( m_dStars.GetLength() && m_dTerms.GetLength() )
{
CSphVector<const XQNode_t *> dChildren;
dChildren.Add ( m_tQuery.m_pRoot );
ARRAY_FOREACH ( i, dChildren )
{
const XQNode_t * pChild = dChildren[i];
if ( !pChild )
continue;
for ( const auto & dChild : pChild->m_dChildren )
dChildren.Add ( dChild );
for ( const auto& dWord : pChild->m_dWords )
{
if ( HasWildcards ( dWord.m_sWord.cstr() ) )
continue;
const auto * sWord = (const BYTE *) dWord.m_sWord.cstr();
int iLen = dWord.m_sWord.Length();
for ( const auto& dStar : m_dStars )
{
if ( MatchStar ( dStar, sWord ) )
{
memcpy ( m_sTmpWord, sWord, iLen );
m_dStarred.Add ( pDict->GetWordID ( m_sTmpWord ) );
break;
}
}
}
}
m_dStarred.Uniq();
}
}
int SnippetsDocIndex_c::GetTermWeight ( int iQueryPos ) const
{
return m_dQposToWeight[iQueryPos];
}
int SnippetsDocIndex_c::GetNumTerms () const
{
return m_dQposToWeight.GetLength();
}
void SnippetsDocIndex_c::AddWord ( SphWordID_t iWordID, int iLengthCP, int iQpos )
{
assert ( iWordID );
Term_t & tTerm = m_dTerms.Add();
tTerm.m_iWordId = iWordID;
tTerm.m_iWeight = iLengthCP;
tTerm.m_iQueryPos = iQpos;
}
void SnippetsDocIndex_c::AddWordStar ( const char * sWord, int iLengthCP, int iQpos )
{
auto iLen = (int) strlen ( sWord );
int iOff = m_dStarBuffer.GetLength();
m_dStarBuffer.Append ( sWord, iLen+1);
assert (m_dStarBuffer[iOff+iLen] == 0);
Keyword_t & tTok = m_dStars.Add();
tTok.m_iWord = iOff;
tTok.m_iLength = iLen;
tTok.m_bStar = true;
tTok.m_iWeight = iLengthCP;
tTok.m_iQueryPos = iQpos;
}
int SnippetsDocIndex_c::ExtractWords ( XQNode_t * pNode, const DictRefPtr_c& pDict, int iQpos )
{
if ( !pNode )
return iQpos;
ARRAY_FOREACH ( i, pNode->m_dWords )
{
const XQKeyword_t & tWord = pNode->m_dWords[i];
int iLenCP = sphUTF8Len ( tWord.m_sWord.cstr() );
if ( HasWildcards ( tWord.m_sWord.cstr() ) )
{
AddWordStar ( tWord.m_sWord.cstr(), iLenCP, iQpos );
iQpos++;
} else
{
strncpy ( (char *)m_sTmpWord, tWord.m_sWord.cstr(), sizeof(m_sTmpWord)-1 );
SphWordID_t iWordID = pDict->GetWordID ( m_sTmpWord );
if ( iWordID )
{
AddWord ( iWordID, iLenCP, iQpos );
iQpos++;
}
}
}
ARRAY_FOREACH ( i, pNode->m_dChildren )
iQpos = ExtractWords ( pNode->m_dChildren[i], pDict, iQpos );
return iQpos;
}
const CSphVector<DWORD> * SnippetsDocIndex_c::GetHitlist ( const XQKeyword_t & tWord, const DictRefPtr_c & pDict ) const
{
int iWord = -1;
if ( HasWildcards ( tWord.m_sWord.cstr() ) )
iWord = FindStarred ( tWord.m_sWord.cstr() );
else
{
strncpy ( (char *)m_sTmpWord, tWord.m_sWord.cstr(), sizeof(m_sTmpWord)-1 );
SphWordID_t iWordID = pDict->GetWordID(m_sTmpWord);
if ( iWordID )
iWord = FindWord ( iWordID, NULL, 0 );
}
if ( iWord!=-1 )
{
auto * pFirstHit = m_dDocHits.Begin();
assert ( pFirstHit );
return pFirstHit+iWord;
}
return nullptr;
}
| 10,125
|
C++
|
.cpp
| 319
| 29.109718
| 130
| 0.676866
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,939
|
index_converter.cpp
|
manticoresoftware_manticoresearch/src/index_converter.cpp
|
//
// Copyright (c) 2018-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxstd.h"
#include "fileutils.h"
#include "sphinxutils.h"
#include "sphinxint.h"
#include "icu.h"
#include "attribute.h"
#include "sphinxsearch.h"
#include "secondaryindex.h"
#include "histogram.h"
#include "sphinxpq.h"
#include "accumulator.h"
#include "indexformat.h"
#include "indexsettings.h"
#include "indexfiles.h"
#include "docidlookup.h"
#include "attrindex_builder.h"
#include "tokenizer/charset_definition_parser.h"
#include "tokenizer/tokenizer.h"
#include "dict/infix/infix_builder.h"
namespace legacy
{
typedef uint64_t SphWordID_t;
typedef uint64_t SphDocID_t;
#define DOCINFO_IDSIZE 2
STATIC_SIZE_ASSERT ( SphWordID_t, 8 );
STATIC_SIZE_ASSERT ( SphDocID_t, 8 );
#define DWSIZEOF(a) ( sizeof(a) / sizeof(DWORD) )
#define MVA_OFFSET_MASK 0x7fffffffUL // MVA offset mask
#define MVA_ARENA_FLAG 0x80000000UL // MVA global-arena flag
const DWORD SPH_SKIPLIST_BLOCK=128;
static const DWORD META_HEADER_MAGIC = 0x54525053; ///< my magic 'SPRT' header
static const DWORD META_VERSION = 17; ///< current version
static const DWORD PQ_META_HEADER_MAGIC = 0x50535451; ///< magic 'PSTQ' header
static const DWORD PQ_META_VERSION = 7; ///< current version
static bool g_bLargeDocid = false;
static CSphString g_sOutDir;
//////////////////////////////////////////////////////////////////////////
/// row entry (storage only, does not necessarily map 1:1 to attributes)
typedef DWORD CSphRowitem;
typedef const BYTE * CSphRowitemPtr;
/// widest integer type that can be be stored as an attribute (ideally, fully decoupled from rowitem size!)
typedef int64_t SphAttr_t;
const int ROWITEM_BITS = 8*sizeof(CSphRowitem);
const int ROWITEM_SHIFT = 5;
STATIC_ASSERT ( ( 1 << ROWITEM_SHIFT )==ROWITEM_BITS, INVALID_ROWITEM_SHIFT );
#ifndef USE_LITTLE_ENDIAN
#error Please define endianness
#endif
template < typename DOCID >
inline DOCID DOCINFO2ID_T ( const DWORD * pDocinfo );
template<> inline DWORD DOCINFO2ID_T ( const DWORD * pDocinfo )
{
return pDocinfo[0];
}
template<> inline uint64_t DOCINFO2ID_T ( const DWORD * pDocinfo )
{
#if USE_LITTLE_ENDIAN
return uint64_t(pDocinfo[0]) + (uint64_t(pDocinfo[1])<<32);
#else
return uint64_t(pDocinfo[1]) + (uint64_t(pDocinfo[0])<<32);
#endif
}
static SphDocID_t DOCINFO2ID ( const DWORD * pDocinfo )
{
return DOCINFO2ID_T<SphDocID_t> ( pDocinfo );
}
template < typename DOCID > inline DWORD * DOCINFO2ATTRS_T ( DWORD * pDocinfo ) { return pDocinfo + DWSIZEOF(DOCID); }
template < typename DOCID > inline const DWORD * DOCINFO2ATTRS_T ( const DWORD * pDocinfo ) { return pDocinfo + DWSIZEOF(DOCID); }
inline DWORD * DOCINFO2ATTRS ( DWORD * pDocinfo ) { return DOCINFO2ATTRS_T<SphDocID_t>(pDocinfo); }
inline const DWORD * DOCINFO2ATTRS ( const DWORD * pDocinfo ) { return DOCINFO2ATTRS_T<SphDocID_t>(pDocinfo); }
enum ESphDocinfo
{
SPH_DOCINFO_NONE = 0, ///< no docinfo available
SPH_DOCINFO_INLINE = 1, ///< inline docinfo into index (specifically, into doclists)
SPH_DOCINFO_EXTERN = 2 ///< store docinfo separately
};
enum IndexType_e
{
INDEX_UNKNOWN,
INDEX_PLAIN,
INDEX_RT,
INDEX_PQ
};
static const char * g_sIndexType[] = { "none", "plain", "rt", "percolate" };
/// unpack string attr from row storage (22 bits length max)
/// returns unpacked length; stores pointer to string data if required
static int sphUnpackStr ( const BYTE * pRow, const BYTE ** ppStr )
{
int v = *pRow++;
if ( v & 0x80 )
{
if ( v & 0x40 )
{
v = ( int ( v & 0x3f )<<16 ) + ( int ( *pRow++ )<<8 );
v += ( *pRow++ ); // MUST be separate statement; cf. sequence point
} else
{
v = ( int ( v & 0x3f )<<8 ) + ( *pRow++ );
}
}
if ( ppStr )
*ppStr = pRow;
return v;
}
static CSphString GetIndexFileName ( const CSphString & sPath, const char * sExt )
{
CSphString sRes;
sRes.SetSprintf ( "%s.%s", sPath.cstr(), sExt );
return sRes;
}
struct IndexSettings_t : public CSphSourceSettings
{
ESphDocinfo m_eDocinfo = SPH_DOCINFO_EXTERN;
ESphHitFormat m_eHitFormat = SPH_HIT_FORMAT_PLAIN;
bool m_bHtmlStrip = false;
CSphString m_sHtmlIndexAttrs;
CSphString m_sHtmlRemoveElements;
CSphString m_sZones;
ESphHitless m_eHitless = SPH_HITLESS_NONE;
int m_iEmbeddedLimit = 0;
int64_t m_tBlobUpdateSpace = 0;
int m_iSkiplistBlockSize = 0;
ESphBigram m_eBigramIndex = SPH_BIGRAM_NONE;
CSphString m_sBigramWords;
StrVec_t m_dBigramWords;
DWORD m_uAotFilterMask = 0;
Preprocessor_e m_ePreprocessor = Preprocessor_e::ICU;
CSphString m_sIndexTokenFilter;
};
struct Wordlist_t : public ISphWordlist
{
public:
SphOffset_t m_iWordsEnd = 0; ///< end of wordlist
int m_iDictCheckpoints = 0; ///< how many dict checkpoints (keyword blocks) are there
SphOffset_t m_iDictCheckpointsOffset = 0; ///< dict checkpoints file position
int m_iInfixCodepointBytes = 0; ///< max bytes per infix codepoint (0 means no infixes)
int64_t m_iInfixBlocksOffset = 0; ///< infix blocks file position (stored as unsigned 32bit int as keywords dictionary is pretty small)
int m_iInfixBlocksWordsSize = 0; ///< infix checkpoints size
Wordlist_t () {}
virtual ~Wordlist_t () override {}
bool Preread ( const char * sName, DWORD uVersion, bool bWordDict, CSphString & sError );
void GetPrefixedWords ( const char * sSubstring, int iSubLen, const char * sWildcard, Args_t & tArgs ) const override {}
void GetInfixedWords ( const char * sSubstring, int iSubLen, const char * sWildcard, Args_t & tArgs ) const override {}
void ScanRegexWords ( const VecTraits_T<RegexTerm_t> & dTerms, const ISphWordlist::Args_t & tArgs, const VecExpandConv_t & dConverters ) const override {}
private:
bool m_bWordDict = false;
};
struct Index_t
{
DWORD m_uVersion = 0;
int64_t m_iMinMaxIndex = 0;
int64_t m_iDocinfo = 0;
bool m_bArenaProhibit = false;
DWORD m_iTotalDocuments = 0;
int64_t m_iTotalBytes = 0;
SphDocID_t m_uMinDocid;
CSphVector<CSphColumnInfo> m_dSchemaFields;
CSphVector<CSphColumnInfo> m_dSchemaAttrs;
IndexSettings_t m_tSettings;
CSphAutoreader m_tDoclistFile;
CSphAutoreader m_tHitlistFile;
CSphMappedBuffer<DWORD> m_tAttr;
CSphMappedBuffer<DWORD> m_tMva;
CSphMappedBuffer<DWORD> m_tMvaArena;
CSphMappedBuffer<BYTE> m_tString;
CSphMappedBuffer<SphDocID_t> m_tKillList;
CSphMappedBuffer<BYTE> m_tSkiplists;
Wordlist_t m_tWordlist;
CSphFixedVector<int64_t> m_dFieldLens { SPH_MAX_FIELDS };
CSphDictSettings m_tDictSettings;
DictRefPtr_c m_pDict;
CSphTokenizerSettings m_tTokSettings;
TokenizerRefPtr_c m_pTokenizer;
CSphFieldFilterSettings m_tFieldFilterSettings;
CSphEmbeddedFiles m_tEmbeddedTok;
CSphEmbeddedFiles m_tEmbeddedDict;
CSphString m_sName;
CSphString m_sPath;
CSphString m_sPathOut;
bool m_bStripPath = false;
KillListTargets_c m_tKlistTargets;
// RT specific
int64_t m_iTID = 0;
int m_iSegmentSeq = 0;
int m_iWordsCheckpoint = 0;
int m_iMaxCodepointLength = 0;
CSphFixedVector<int> m_dRtChunkNames {0};
CSphSchema m_tSchema;
// PQ specific
CSphFixedVector<StoredQueryDesc_t> m_dStored { 0 };
bool IsSeparateOutDir () const { return m_sPath!=m_sPathOut; }
CSphString GetFilename ( ESphExt eExt ) const
{
CSphString sName;
if ( IsSeparateOutDir() )
sName.SetSprintf ( "%s%s", m_sPathOut.cstr(), sphGetExt(eExt) );
else
sName.SetSprintf ( "%s.new%s", m_sPathOut.cstr(), sphGetExt(eExt) );
return sName;
}
};
static bool LoadPersistentMVA ( Index_t & tIndex, const CSphString & sPath, CSphString & sError )
{
// prepare the file to load
if ( !sphIsReadable ( GetIndexFileName( sPath, "mvp" ).cstr() ) )
{
// no mvp means no saved attributes.
return true;
}
if ( tIndex.m_bArenaProhibit )
{
sError.SetSprintf ( "MVA update disabled (already so many MVA " INT64_FMT ", should be less %d)", tIndex.m_tMva.GetLength64(), INT_MAX );
return false;
}
if ( !tIndex.m_tMvaArena.Setup ( GetIndexFileName( sPath, "mvp" ).cstr(), sError, false ) )
return false;
return true;
}
static void LoadIndexSettings ( IndexSettings_t & tSettings, CSphReader & tReader, DWORD uVersion )
{
tSettings.SetMinPrefixLen ( tReader.GetDword() );
tSettings.m_iMinInfixLen = tReader.GetDword ();
if ( uVersion>=38 )
tSettings.m_iMaxSubstringLen = tReader.GetDword();
tSettings.m_bHtmlStrip = !!tReader.GetByte ();
tSettings.m_sHtmlIndexAttrs = tReader.GetString ();
tSettings.m_sHtmlRemoveElements = tReader.GetString ();
tSettings.m_bIndexExactWords = !!tReader.GetByte ();
tSettings.m_eHitless = (ESphHitless)tReader.GetDword();
tSettings.m_eHitFormat = (ESphHitFormat)tReader.GetDword();
tSettings.m_bIndexSP = !!tReader.GetByte();
tSettings.m_sZones = tReader.GetString();
tSettings.m_iBoundaryStep = (int)tReader.GetDword();
tSettings.m_iStopwordStep = (int)tReader.GetDword();
tSettings.m_iOvershortStep = (int)tReader.GetDword();
tSettings.m_iEmbeddedLimit = (int)tReader.GetDword();
if ( uVersion>=32 )
{
tSettings.m_eBigramIndex = (ESphBigram)tReader.GetByte();
tSettings.m_sBigramWords = tReader.GetString();
}
if ( uVersion>=35 )
tSettings.m_bIndexFieldLens = ( tReader.GetByte()!=0 );
if ( uVersion>=39 )
{
tSettings.m_ePreprocessor = tReader.GetByte()==1 ? Preprocessor_e::ICU : Preprocessor_e::NONE;
tReader.GetString(); // was: RLP context
}
if ( uVersion>=41 )
tSettings.m_sIndexTokenFilter = tReader.GetString();
if ( uVersion>55 )
tSettings.m_tBlobUpdateSpace = tReader.GetOffset();
if ( uVersion<56 )
tSettings.m_iSkiplistBlockSize = 128;
else
tSettings.m_iSkiplistBlockSize = (int)tReader.GetDword();
}
static bool LoadTokenizerSettings ( CSphReader & tReader, CSphTokenizerSettings & tSettings, CSphEmbeddedFiles & tEmbeddedFiles, DWORD uVersion, CSphString & sWarning )
{
tSettings.m_iType = tReader.GetByte ();
if ( tSettings.m_iType!=TOKENIZER_UTF8 && tSettings.m_iType!=TOKENIZER_NGRAM )
{
sWarning = "can't load an old table with SBCS tokenizer";
return false;
}
tSettings.m_sCaseFolding = tReader.GetString ();
tSettings.m_iMinWordLen = tReader.GetDword ();
tEmbeddedFiles.m_bEmbeddedSynonyms = false;
tEmbeddedFiles.m_bEmbeddedSynonyms = !!tReader.GetByte();
if ( tEmbeddedFiles.m_bEmbeddedSynonyms )
{
int nSynonyms = (int)tReader.GetDword();
tEmbeddedFiles.m_dSynonyms.Resize ( nSynonyms );
ARRAY_FOREACH ( i, tEmbeddedFiles.m_dSynonyms )
tEmbeddedFiles.m_dSynonyms[i] = tReader.GetString();
}
tSettings.m_sSynonymsFile = tReader.GetString ();
tEmbeddedFiles.m_tSynonymFile.Read ( tReader, tSettings.m_sSynonymsFile.cstr (), false, tEmbeddedFiles.m_bEmbeddedSynonyms ? NULL : &sWarning );
tSettings.m_sBoundary = tReader.GetString ();
tSettings.m_sIgnoreChars = tReader.GetString ();
tSettings.m_iNgramLen = tReader.GetDword ();
tSettings.m_sNgramChars = tReader.GetString ();
tSettings.m_sBlendChars = tReader.GetString ();
tSettings.m_sBlendMode = tReader.GetString();
return true;
}
static void LoadDictionarySettings ( CSphReader & tReader, CSphDictSettings & tSettings, CSphEmbeddedFiles & tEmbeddedFiles, DWORD uVersion, CSphString & sWarning )
{
tSettings.m_sMorphology = tReader.GetString ();
if ( uVersion>=43 )
tSettings.m_sMorphFields = tReader.GetString();
tEmbeddedFiles.m_bEmbeddedStopwords = false;
tEmbeddedFiles.m_bEmbeddedStopwords = !!tReader.GetByte();
if ( tEmbeddedFiles.m_bEmbeddedStopwords )
{
int nStopwords = (int)tReader.GetDword();
tEmbeddedFiles.m_dStopwords.Resize ( nStopwords );
ARRAY_FOREACH ( i, tEmbeddedFiles.m_dStopwords )
tEmbeddedFiles.m_dStopwords[i] = (SphWordID_t)tReader.UnzipOffset();
}
tSettings.m_sStopwords = tReader.GetString ();
int nFiles = tReader.GetDword ();
CSphString sFile;
tEmbeddedFiles.m_dStopwordFiles.Resize ( nFiles );
for ( int i = 0; i < nFiles; i++ )
{
sFile = tReader.GetString ();
tEmbeddedFiles.m_dStopwordFiles[i].Read ( tReader, sFile.cstr (), false, tEmbeddedFiles.m_bEmbeddedSynonyms ? NULL : &sWarning );
}
tEmbeddedFiles.m_bEmbeddedWordforms = false;
tEmbeddedFiles.m_bEmbeddedWordforms = !!tReader.GetByte();
if ( tEmbeddedFiles.m_bEmbeddedWordforms )
{
int nWordforms = (int)tReader.GetDword();
tEmbeddedFiles.m_dWordforms.Resize ( nWordforms );
ARRAY_FOREACH ( i, tEmbeddedFiles.m_dWordforms )
tEmbeddedFiles.m_dWordforms[i] = tReader.GetString();
}
tSettings.m_dWordforms.Resize ( tReader.GetDword() );
tEmbeddedFiles.m_dWordformFiles.Resize ( tSettings.m_dWordforms.GetLength() );
ARRAY_FOREACH ( i, tSettings.m_dWordforms )
{
tSettings.m_dWordforms[i] = tReader.GetString();
tEmbeddedFiles.m_dWordformFiles[i].Read ( tReader, tSettings.m_dWordforms[i].cstr(), false, tEmbeddedFiles.m_bEmbeddedWordforms ? NULL : &sWarning );
}
tSettings.m_iMinStemmingLen = tReader.GetDword ();
tSettings.m_bWordDict = false; // default to crc for old indexes
tSettings.m_bWordDict = ( tReader.GetByte()!=0 );
if ( uVersion>=36 )
tSettings.m_bStopwordsUnstemmed = ( tReader.GetByte()!=0 );
if ( uVersion>=37 )
tSettings.m_sMorphFingerprint = tReader.GetString();
}
static void LoadFieldFilterSettings ( CSphReader & tReader, CSphFieldFilterSettings & tFieldFilterSettings )
{
int nRegexps = tReader.GetDword();
if ( !nRegexps )
return;
tFieldFilterSettings.m_dRegexps.Resize ( nRegexps );
ARRAY_FOREACH ( i, tFieldFilterSettings.m_dRegexps )
tFieldFilterSettings.m_dRegexps[i] = tReader.GetString();
tReader.GetByte(); // deprecated utf-8 flag
}
static void ReadSchemaColumn ( CSphReader & rdInfo, CSphColumnInfo & tCol )
{
tCol.m_sName = rdInfo.GetString ();
if ( tCol.m_sName.IsEmpty () )
tCol.m_sName = "@emptyname";
tCol.m_sName.ToLower ();
tCol.m_eAttrType = (ESphAttr) rdInfo.GetDword ();
rdInfo.GetDword (); // ignore rowitem
tCol.m_tLocator.m_iBitOffset = rdInfo.GetDword ();
tCol.m_tLocator.m_iBitCount = rdInfo.GetDword ();
tCol.m_bPayload = ( rdInfo.GetByte()!=0 );
}
static void ReadSchema ( CSphReader & rdInfo, CSphVector<CSphColumnInfo> & dFields, CSphVector<CSphColumnInfo> & dAttrs )
{
int iNumFields = rdInfo.GetDword();
for ( int i=0; i<iNumFields; i++ )
{
CSphColumnInfo tCol;
ReadSchemaColumn ( rdInfo, tCol );
dFields.Add ( tCol );
}
int iNumAttrs = rdInfo.GetDword();
for ( int i=0; i<iNumAttrs; i++ )
{
CSphColumnInfo tCol;
ReadSchemaColumn ( rdInfo, tCol );
dAttrs.Add ( tCol );
}
}
static int GetRowSize ( const CSphVector<CSphColumnInfo> & dAttrs )
{
int iMaxBitSize = 0;
for ( const auto & i : dAttrs )
iMaxBitSize = Max ( iMaxBitSize, i.m_tLocator.m_iBitOffset + i.m_tLocator.m_iBitCount );
return (iMaxBitSize+ROWITEM_BITS-1) / ROWITEM_BITS;
}
static bool SetupWordProcessors ( Index_t & tIndex, CSphString & sError )
{
StrVec_t dWarnings;
TokenizerRefPtr_c pTokenizer = Tokenizer::Create ( tIndex.m_tTokSettings, &tIndex.m_tEmbeddedTok, nullptr, dWarnings, sError );
if ( !pTokenizer )
return false;
DictRefPtr_c pDict { tIndex.m_tDictSettings.m_bWordDict
? sphCreateDictionaryKeywords ( tIndex.m_tDictSettings, &tIndex.m_tEmbeddedDict, pTokenizer, tIndex.m_sName.cstr(), false, tIndex.m_tSettings.m_iSkiplistBlockSize, nullptr, sError )
: sphCreateDictionaryCRC ( tIndex.m_tDictSettings, &tIndex.m_tEmbeddedDict, pTokenizer, tIndex.m_sName.cstr(), false, tIndex.m_tSettings.m_iSkiplistBlockSize, nullptr, sError ) };
if ( !pDict )
return false;
tIndex.m_pDict = pDict;
Tokenizer::AddToMultiformFilterTo ( pTokenizer, tIndex.m_pDict->GetMultiWordforms () );
// initialize AOT if needed
tIndex.m_tSettings.m_uAotFilterMask = sphParseMorphAot ( tIndex.m_tDictSettings.m_sMorphology.cstr() );
// aot filter
if ( tIndex.m_tSettings.m_uAotFilterMask )
sphAotTransformFilter ( pTokenizer, tIndex.m_pDict, tIndex.m_tSettings.m_bIndexExactWords, tIndex.m_tSettings.m_uAotFilterMask );
tIndex.m_pTokenizer = pTokenizer;
return true;
}
static bool LoadHeader ( const char * sHeaderName, Index_t & tIndex, CSphString & sError )
{
CSphAutoreader rdInfo;
if ( !rdInfo.Open ( sHeaderName, sError ) )
return false;
// magic header
const char * sFmt = CheckFmtMagic ( rdInfo.GetDword () );
if ( sFmt )
{
sError.SetSprintf ( sFmt, sHeaderName );
return false;
}
// version
tIndex.m_uVersion = rdInfo.GetDword();
if ( tIndex.m_uVersion<=1 || tIndex.m_uVersion>INDEX_FORMAT_VERSION || tIndex.m_uVersion<34 )
{
sError.SetSprintf ( "%s is v.%d, binary is v.%d", tIndex.m_sName.cstr(), tIndex.m_uVersion, INDEX_FORMAT_VERSION );
return false;
}
if ( tIndex.m_uVersion>=50 )
{
sError.SetSprintf ( "already a v3 table; nothing to do" );
return false;
}
// bits
bool bUse64 = !!rdInfo.GetDword();
if ( !bUse64 )
{
sError = "tables with 32-bit docids are no longer supported";
return false;
}
// skiplists
if ( tIndex.m_uVersion<31 )
{
sError = "tables without skiplist unsupported";
return false;
}
// docinfo
ESphDocinfo eDocinfo = (ESphDocinfo)rdInfo.GetDword();
if ( eDocinfo!=SPH_DOCINFO_EXTERN )
{
sError.SetSprintf ( "table without docinfo extern unsupported, docinfo is %d", (int)eDocinfo );
return false;
}
ReadSchema ( rdInfo, tIndex.m_dSchemaFields, tIndex.m_dSchemaAttrs );
tIndex.m_uMinDocid = (SphDocID_t)rdInfo.GetOffset ();
tIndex.m_tWordlist.m_iDictCheckpointsOffset = rdInfo.GetOffset();
tIndex.m_tWordlist.m_iDictCheckpoints = rdInfo.GetDword();
tIndex.m_tWordlist.m_iInfixCodepointBytes = rdInfo.GetByte();
tIndex.m_tWordlist.m_iInfixBlocksOffset = rdInfo.GetDword();
tIndex.m_tWordlist.m_iInfixBlocksWordsSize = rdInfo.GetDword();
// index stats
tIndex.m_iTotalDocuments = rdInfo.GetDword (); // m_iTotalDocuments
tIndex.m_iTotalBytes = rdInfo.GetOffset (); // m_iTotalBytes
if ( tIndex.m_uVersion>=40 )
rdInfo.GetDword(); // m_iTotalDups
legacy::LoadIndexSettings ( tIndex.m_tSettings, rdInfo, tIndex.m_uVersion );
// tokenizer stuff
if ( !LoadTokenizerSettings ( rdInfo, tIndex.m_tTokSettings, tIndex.m_tEmbeddedTok, tIndex.m_uVersion, sError ) )
return false;
if ( tIndex.m_bStripPath )
StripPath ( tIndex.m_tTokSettings.m_sSynonymsFile );
// dictionary stuff
legacy::LoadDictionarySettings ( rdInfo, tIndex.m_tDictSettings, tIndex.m_tEmbeddedDict, tIndex.m_uVersion, sError );
if ( !sError.IsEmpty() )
{
sphWarning ( "%s", sError.cstr() );
sError = "";
}
if ( tIndex.m_bStripPath )
{
StripPath ( tIndex.m_tDictSettings.m_sStopwords );
ARRAY_FOREACH ( i, tIndex.m_tDictSettings.m_dWordforms )
StripPath ( tIndex.m_tDictSettings.m_dWordforms[i] );
}
if ( !SetupWordProcessors ( tIndex, sError ) )
return false;
rdInfo.GetDword ();
tIndex.m_iMinMaxIndex = rdInfo.GetOffset ();
legacy::LoadFieldFilterSettings ( rdInfo, tIndex.m_tFieldFilterSettings );
if ( tIndex.m_uVersion>=35 && tIndex.m_tSettings.m_bIndexFieldLens )
for ( int i=0; i < tIndex.m_dSchemaFields.GetLength(); i++ )
tIndex.m_dFieldLens[i] = rdInfo.GetOffset();
if ( rdInfo.GetErrorFlag() )
{
sError.SetSprintf ( "%s: failed to parse header (unexpected eof)", sHeaderName );
return false;
}
if ( rdInfo.GetPos()!=rdInfo.GetFilesize() )
{
sError.SetSprintf ( "%s: unexpected tail left; position " INT64_FMT ", file size " INT64_FMT , sHeaderName, (int64_t)rdInfo.GetPos(), (int64_t)rdInfo.GetFilesize() );
return false;
}
return true;
}
bool Wordlist_t::Preread ( const char * sName, DWORD uVersion, bool bWordDict, CSphString & sError )
{
assert ( ( uVersion>=21 && bWordDict ) || !bWordDict );
assert ( m_iDictCheckpointsOffset>0 );
m_bWordDict = bWordDict;
m_iWordsEnd = m_iDictCheckpointsOffset; // set wordlist end
////////////////////////////
// preload word checkpoints
////////////////////////////
////////////////////////////
// regular path that loads checkpoints data
CSphAutoreader tReader;
if ( !tReader.Open ( sName, sError ) )
return false;
int64_t iFileSize = tReader.GetFilesize();
if ( iFileSize-m_iDictCheckpointsOffset>=UINT_MAX )
{
sError.SetSprintf ( "dictionary meta overflow: meta size=" INT64_FMT ", total size=" INT64_FMT ", meta offset=" INT64_FMT,
iFileSize-m_iDictCheckpointsOffset, iFileSize, (int64_t)m_iDictCheckpointsOffset );
return false;
}
////////////////////////
// preload infix blocks
////////////////////////
if ( m_iInfixCodepointBytes && m_iInfixBlocksOffset )
{
SphOffset_t uInfixOffset = 0;
tReader.SeekTo ( m_iInfixBlocksOffset, (int)(iFileSize-m_iInfixBlocksOffset) );
int iInfixCount = tReader.UnzipInt();
if ( iInfixCount )
{
int iBytes = tReader.UnzipInt();
tReader.SkipBytes ( iBytes );
uInfixOffset = tReader.UnzipInt();
}
// FIXME!!! store and load that explicitly
if ( iInfixCount )
m_iWordsEnd = uInfixOffset - g_sTagInfixEntries.second;
else
m_iWordsEnd -= g_sTagInfixEntries.second;
}
if ( tReader.GetErrorFlag() )
{
sError = tReader.GetErrorMessage();
return false;
}
tReader.Close();
return true;
}
static bool LoadPlainIndexChunk ( Index_t & tIndex, CSphString & sError )
{
const CSphString & sPath = tIndex.m_sPath;
// preload schema
if ( !LoadHeader ( GetIndexFileName ( sPath, "sph" ).cstr(), tIndex, sError ) )
return false;
if ( tIndex.m_tSettings.m_eDocinfo!=SPH_DOCINFO_EXTERN )
{
sError.SetSprintf ( "only docinfo extern supported, docinfo=%d", (int)tIndex.m_tSettings.m_eDocinfo );
return false;
}
// verify that data files are readable
if ( !sphIsReadable ( GetIndexFileName ( sPath, "spd" ).cstr(), &sError ) )
return false;
if ( !sphIsReadable ( GetIndexFileName ( sPath, "spp" ).cstr(), &sError ) )
return false;
if ( !sphIsReadable ( GetIndexFileName ( sPath, "spe" ).cstr(), &sError ) )
return false;
// preopen
if ( !tIndex.m_tDoclistFile.Open ( GetIndexFileName ( sPath, "spd" ), sError ) )
return false;
if ( !tIndex.m_tHitlistFile.Open ( GetIndexFileName ( sPath, "spp" ), sError ) )
return false;
/////////////////////
// prealloc wordlist
/////////////////////
if ( !sphIsReadable ( GetIndexFileName ( sPath, "spi" ).cstr(), &sError ) )
return false;
// only checkpoint and wordlist infixes are actually read here; dictionary itself is just mapped
if ( !tIndex.m_tWordlist.Preread ( GetIndexFileName ( sPath, "spi" ).cstr(), tIndex.m_uVersion, tIndex.m_tDictSettings.m_bWordDict, sError ) )
return false;
{
CSphAutofile tDocinfo ( GetIndexFileName ( sPath, "spa" ), SPH_O_READ, sError );
if ( tDocinfo.GetFD()<0 )
return false;
}
/////////////////////
// prealloc docinfos
/////////////////////
/////////////
// attr data
/////////////
int iStride = DOCINFO_IDSIZE + GetRowSize ( tIndex.m_dSchemaAttrs );
if ( !tIndex.m_tAttr.Setup ( GetIndexFileName ( sPath, "spa" ).cstr(), sError, false ) )
return false;
int64_t iDocinfoSize = tIndex.m_tAttr.GetLengthBytes();
if ( iDocinfoSize<0 )
return false;
iDocinfoSize = iDocinfoSize / sizeof(DWORD);
int64_t iRealDocinfoSize = tIndex.m_iMinMaxIndex ? tIndex.m_iMinMaxIndex : iDocinfoSize;
tIndex.m_iDocinfo = iRealDocinfoSize / iStride;
if ( iDocinfoSize < iRealDocinfoSize )
{
sError.SetSprintf ( "precomputed chunk size check mismatch (size=" INT64_FMT ", real=" INT64_FMT ", min-max=" INT64_FMT ", count=" INT64_FMT ")",
iDocinfoSize, iRealDocinfoSize, tIndex.m_iMinMaxIndex, tIndex.m_iDocinfo );
return false;
}
////////////
// MVA data
////////////
if ( !tIndex.m_tMva.Setup ( GetIndexFileName ( sPath, "spm" ).cstr(), sError, false ) )
return false;
if ( tIndex.m_tMva.GetLength64()>INT_MAX )
{
tIndex.m_bArenaProhibit = true;
sphWarning ( "MVA update disabled (loaded MVA " INT64_FMT ", should be less %d)", tIndex.m_tMva.GetLength64(), INT_MAX );
}
///////////////
// string data
///////////////
if ( !tIndex.m_tString.Setup ( GetIndexFileName ( sPath, "sps" ).cstr(), sError, false ) )
return false;
// prealloc killlist
if ( !tIndex.m_tKillList.Setup ( GetIndexFileName ( sPath, "spk" ).cstr(), sError, false ) )
return false;
// prealloc skiplist
if ( !tIndex.m_tSkiplists.Setup ( GetIndexFileName ( sPath, "spe" ).cstr(), sError, false ) )
return false;
// almost done
bool bPersistMVA = sphIsReadable ( GetIndexFileName ( sPath, "mvp" ).cstr() );
if ( bPersistMVA )
{
if ( tIndex.m_bArenaProhibit )
{
sError.SetSprintf ( "MVA update disabled (already so many MVA " INT64_FMT ", should be less %d)", tIndex.m_tMva.GetLength64(), INT_MAX );
return false;
}
if ( !LoadPersistentMVA ( tIndex, sPath, sError ) )
return false;
}
return true;
}
struct AttrConverter_t
{
AttrConverter_t ( const Index_t & tSrc, const CSphSchema & tDst, BlobRowBuilder_i * pBlob );
CSphRowitem * NextRow();
SphDocID_t GetRowDocid() { return m_tCurDocID; }
CSphFixedVector<CSphRowitem> m_dDstRow {0};
CSphVector<int64_t> m_dMVA;
const int m_iSrcStride = 0;
const int64_t m_iRows = 0;
int64_t m_iCurRow = -1;
SphDocID_t m_tCurDocID = 0;
const Index_t & m_tIndex;
BlobRowBuilder_i * m_pBlob = nullptr;
CSphString m_sError;
CSphAttrLocator m_tLocID;
CSphFixedVector<CSphAttrLocator> m_dDstLoc { 0 };
const DWORD * m_pMvaUpdates = nullptr;
};
AttrConverter_t::AttrConverter_t ( const Index_t & tSrc, const CSphSchema & tDst, BlobRowBuilder_i * pBlob )
: m_iSrcStride ( DOCINFO_IDSIZE + GetRowSize ( tSrc.m_dSchemaAttrs ) )
, m_iRows ( tSrc.m_iDocinfo )
, m_tIndex ( tSrc )
, m_pBlob ( pBlob )
{
m_dDstRow.Reset ( tDst.GetRowSize() );
const CSphColumnInfo * pColumnID = tDst.GetAttr ( sphGetDocidName() );
assert ( pColumnID );
assert ( !pColumnID->m_tLocator.m_bDynamic );
m_tLocID = pColumnID->m_tLocator;
const CSphVector<CSphColumnInfo> & dAttrs = m_tIndex.m_dSchemaAttrs;
m_dDstLoc.Reset ( dAttrs.GetLength() );
ARRAY_FOREACH ( i, dAttrs )
{
const CSphColumnInfo & tColumnSrc = dAttrs[i];
if ( tColumnSrc.m_eAttrType==SPH_ATTR_STRING || tColumnSrc.m_eAttrType==SPH_ATTR_JSON ||
tColumnSrc.m_eAttrType==SPH_ATTR_UINT32SET || tColumnSrc.m_eAttrType==SPH_ATTR_INT64SET )
continue;
const CSphColumnInfo * pColumnDst = tDst.GetAttr ( tColumnSrc.m_sName.cstr() );
assert ( pColumnDst );
m_dDstLoc[i]= pColumnDst->m_tLocator;
}
// persist MVA
if ( !tSrc.m_tMvaArena.IsEmpty() )
{
const DWORD * pMvaArena = tSrc.m_tMvaArena.GetReadPtr();
DWORD uDocs = *pMvaArena;
if ( uDocs )
m_pMvaUpdates = (pMvaArena+1) + uDocs * sizeof(SphDocID_t)/sizeof(DWORD);
}
}
CSphRowitem * AttrConverter_t::NextRow()
{
if ( m_iCurRow+1<m_iRows )
m_iCurRow++;
else
return nullptr;
const CSphRowitem * pSrcRow = m_tIndex.m_tAttr.GetReadPtr() + m_iCurRow * m_iSrcStride;
m_tCurDocID = DOCINFO2ID ( pSrcRow );
const CSphRowitem * pAttrs = DOCINFO2ATTRS ( pSrcRow );
m_dDstRow.Fill ( 0 );
sphSetRowAttr ( m_dDstRow.Begin(), m_tLocID, m_tCurDocID );
int iBlobAttr = 0;
const CSphVector<CSphColumnInfo> & dAttrs = m_tIndex.m_dSchemaAttrs;
ARRAY_FOREACH ( i, dAttrs )
{
const CSphColumnInfo & tColumnSrc = dAttrs[i];
if ( tColumnSrc.m_eAttrType==SPH_ATTR_STRING || tColumnSrc.m_eAttrType==SPH_ATTR_JSON )
{
const DWORD uOff = (DWORD)sphGetRowAttr ( pAttrs, tColumnSrc.m_tLocator );
const BYTE * pStr = nullptr;
int iLen = 0;
if ( uOff )
iLen = sphUnpackStr ( m_tIndex.m_tString.GetReadPtr() + uOff, &pStr );
assert ( m_pBlob );
m_pBlob->SetAttr( iBlobAttr++, (const BYTE*)pStr, iLen, m_sError );
} else if ( tColumnSrc.m_eAttrType==SPH_ATTR_UINT32SET || tColumnSrc.m_eAttrType==SPH_ATTR_INT64SET )
{
DWORD uOff = (DWORD)sphGetRowAttr ( pAttrs, tColumnSrc.m_tLocator );
const DWORD * pMva = nullptr;
if ( uOff )
{
if ( !m_tIndex.m_bArenaProhibit && ( uOff & MVA_ARENA_FLAG ) )
{
assert ( m_pMvaUpdates && m_pMvaUpdates<m_tIndex.m_tMvaArena.GetReadPtr() + m_tIndex.m_tMvaArena.GetLength64() );
pMva = m_pMvaUpdates;
int iCount = *m_pMvaUpdates;
m_pMvaUpdates += iCount + 1;
} else
{
pMva = m_tIndex.m_tMva.GetReadPtr() + uOff;
}
}
int iValues = 0;
if ( pMva )
iValues = *pMva++;
if ( tColumnSrc.m_eAttrType==SPH_ATTR_UINT32SET )
{
// blob packer expect all types of MVA as int64
m_dMVA.Resize ( iValues );
for ( int iValue=0; iValue<iValues; iValue++ )
m_dMVA[iValue] = pMva[iValue];
pMva = (const DWORD *)m_dMVA.Begin();
} else
{
iValues /= 2;
}
m_pBlob->SetAttr ( iBlobAttr++, (const BYTE*)pMva, iValues*sizeof(int64_t), m_sError );
} else
{
SphAttr_t tValue = sphGetRowAttr ( pAttrs, tColumnSrc.m_tLocator );
const CSphAttrLocator & tDstLoc = m_dDstLoc[i];
sphSetRowAttr ( m_dDstRow.Begin(), tDstLoc, tValue );
}
}
return m_dDstRow.Begin();
}
struct DoclistOffsets_t
{
SphOffset_t m_uDoclist;
SphOffset_t m_uSkiplist;
};
struct Checkpoint_t
{
uint64_t m_uWord;
uint64_t m_uOffset;
};
struct ConverterPlain_t
{
bool Save ( const CSphVector<SphDocID_t> & dKilled, Index_t & tIndex, bool bIgnoreKlist, CSphString & sError );
const CSphSchema & GetSchema() const { return m_tSchema; }
bool ConvertSchema ( Index_t & tIndex, CSphString & sError );
private:
std::unique_ptr<ISphInfixBuilder> m_pInfixer;
CSphSchema m_tSchema;
SphOffset_t m_tCheckpointsPosition = 0;
SphOffset_t m_tDocinfoIndex = 0;
SphOffset_t m_tMinMaxPos = 0;
int64_t m_iInfixBlockOffset = 0;
int m_iInfixCheckpointWordsSize = 0;
CSphVector<Checkpoint_t> m_dCheckpoints;
CSphVector<BYTE> m_dKeywordCheckpoints;
OpenHashTable_T<SphDocID_t, RowID_t> m_hDoc2Row;
OpenHashTable_T<SphOffset_t, DoclistOffsets_t> m_hDoclist;
bool WriteLookup ( Index_t & tIndex, CSphString & sError );
bool WriteAttributes ( Index_t & tIndex, CSphString & sError );
void WriteCheckpoints ( const Index_t & tIndex, CSphWriter & tWriterDict );
bool WriteKillList ( const Index_t & tIndex, bool bIgnoreKlist, CSphString & sError );
void SaveHeader ( const Index_t & tIndex, DWORD uKillListSize ) const;
bool Init ( Index_t & tIndex, CSphString & sError );
bool ConvertDoclist ( Index_t & tIndex, CSphString & sError );
bool ConvertDictionary ( Index_t & tIndex, CSphString & sError );
};
bool ConverterPlain_t::WriteLookup ( Index_t & tIndex, CSphString & sError )
{
CSphString sSPA = tIndex.GetFilename ( SPH_EXT_SPA );
CSphAutofile tSPA ( sSPA.cstr(), SPH_O_READ, sError );
if ( tSPA.GetFD()==-1 )
return false;
CSphReader tSPAReader;
tSPAReader.SetFile(tSPA);
HistogramContainer_c tHistogramContainer;
CSphVector<Histogram_i *> dHistograms;
CSphVector<CSphColumnInfo> dPOD;
for ( int i = 0; i < m_tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(i);
std::unique_ptr<Histogram_i> pHistogram = CreateHistogram ( tAttr.m_sName, tAttr.m_eAttrType );
if ( pHistogram )
{
dHistograms.Add ( pHistogram.get() );
Verify ( tHistogramContainer.Add ( std::move ( pHistogram ) ) );
dPOD.Add ( tAttr );
}
}
int iStride = m_tSchema.GetRowSize();
CSphVector<CSphRowitem> dRow ( iStride );
CSphRowitem * pRow = dRow.Begin();
CSphFixedVector<DocidRowidPair_t> dDocidLookup ( tIndex.m_iTotalDocuments );
for ( RowID_t tRowID = 0; tRowID < tIndex.m_iTotalDocuments; tRowID++ )
{
tSPAReader.GetBytes ( pRow, iStride*sizeof(CSphRowitem) );
if ( tSPAReader.GetErrorFlag() )
{
sError = tSPAReader.GetErrorMessage();
return false;
}
ARRAY_FOREACH ( i, dHistograms )
dHistograms[i]->Insert ( sphGetRowAttr ( pRow, dPOD[i].m_tLocator ) );
dDocidLookup[tRowID].m_tDocID = sphGetDocID(pRow);
dDocidLookup[tRowID].m_tRowID = tRowID;
}
dDocidLookup.Sort ( CmpDocidLookup_fn() );
CSphString sSPT = tIndex.GetFilename ( SPH_EXT_SPT );
if ( !::WriteDocidLookup ( sSPT, dDocidLookup, sError ) )
return false;
CSphString sSPHI = tIndex.GetFilename ( SPH_EXT_SPHI );
if ( !tHistogramContainer.Save ( sSPHI, sError ) )
return false;
return true;
}
bool ConverterPlain_t::WriteAttributes ( Index_t & tIndex, CSphString & sError )
{
CSphWriter tWriterSPA;
CSphString sSPA = tIndex.GetFilename ( SPH_EXT_SPA );
CSphString sSPB = tIndex.GetFilename ( SPH_EXT_SPB );
if ( !tWriterSPA.OpenFile ( sSPA, sError ) )
return false;
const CSphColumnInfo * pBlobLocatorAttr = m_tSchema.GetAttr ( sphGetBlobLocatorName() );
AttrIndexBuilder_c tMinMaxBuilder ( m_tSchema );
std::unique_ptr<BlobRowBuilder_i> pBlobRowBuilder;
if ( pBlobLocatorAttr )
{
BuildBufferSettings_t tSettings; // use default buffer settings
pBlobRowBuilder = sphCreateBlobRowJsonBuilder ( m_tSchema, sSPB, tIndex.m_tSettings.m_tBlobUpdateSpace, tSettings.m_iBufferAttributes, sError );
if ( !pBlobRowBuilder )
return false;
}
RowID_t tNextRowID = 0;
int iStride = m_tSchema.GetRowSize();
AttrConverter_t tConv ( tIndex, m_tSchema, pBlobRowBuilder.get() );
CSphRowitem * pRow = nullptr;
while ( ( pRow = tConv.NextRow() )!=nullptr )
{
if ( pBlobLocatorAttr )
sphSetRowAttr ( pRow, pBlobLocatorAttr->m_tLocator, pBlobRowBuilder->Flush().first );
tMinMaxBuilder.Collect ( pRow );
tWriterSPA.PutBytes ( pRow, iStride*sizeof(CSphRowitem) );
SphDocID_t uDocid = tConv.GetRowDocid();
if ( uDocid>INT64_MAX )
{
sError.SetSprintf ( "unable to convert document ID " UINT64_FMT " greater than " INT64_FMT " at row %d", uDocid, INT64_MAX, tNextRowID );
if ( !g_bLargeDocid )
return false;
else
{
sphWarning ( "%s, wrapping value", sError.cstr() );
sError = "";
}
}
m_hDoc2Row.Acquire ( uDocid ) = tNextRowID;
tNextRowID++;
}
if ( pBlobRowBuilder && !pBlobRowBuilder->Done ( sError ) )
return false;
tMinMaxBuilder.FinishCollect();
const CSphTightVector<CSphRowitem> & dMinMaxRows = tMinMaxBuilder.GetCollected();
m_tMinMaxPos = tWriterSPA.GetPos();
tWriterSPA.PutBytes ( dMinMaxRows.Begin(), dMinMaxRows.GetLength()*sizeof(CSphRowitem) );
tWriterSPA.CloseFile();
tIndex.m_iTotalDocuments = tNextRowID;
m_tDocinfoIndex = ( dMinMaxRows.GetLength() / m_tSchema.GetRowSize() / 2 ) - 1;
if ( !WriteLookup ( tIndex, sError ) )
return false;
return true;
}
bool ConverterPlain_t::ConvertDoclist ( Index_t & tIndex, CSphString & sError )
{
CSphWriter tWriterDocs, tWriterSkips;
if ( !tWriterDocs.OpenFile ( tIndex.GetFilename ( SPH_EXT_SPD ), sError ) )
return false;
if ( !tWriterSkips.OpenFile ( tIndex.GetFilename ( SPH_EXT_SPE ), sError ) )
return false;
CSphAutoreader & tDoclist = tIndex.m_tDoclistFile;
tDoclist.SeekTo ( 1, 0 );
tWriterDocs.PutByte(1);
tWriterSkips.PutByte(1);
const SphOffset_t uDoclistEnd = tDoclist.GetFilesize();
const bool bInlineHits = ( tIndex.m_tSettings.m_eHitFormat==SPH_HIT_FORMAT_INLINE );
CSphVector<SkiplistEntry_t> dSkiplist;
m_hDoclist.Reset ( tIndex.m_iDocinfo );
DWORD uSkiplistBlock = tIndex.m_tSettings.m_iSkiplistBlockSize;
while ( uDoclistEnd!=tDoclist.GetPos() )
{
const SphOffset_t uOldDoclist = tDoclist.GetPos();
const SphOffset_t uNewDoclist = tWriterDocs.GetPos();
SphDocID_t uOldDocid = tIndex.m_uMinDocid;
SphDocID_t uDelta = 0;
SphOffset_t uLastHitpos = 0;
RowID_t tLastRowID = INVALID_ROWID;
RowID_t tSkiplistRowID = INVALID_ROWID;
int iDocs = 0;
dSkiplist.Resize(0);
while ( true )
{
uDelta = tDoclist.UnzipOffset();
if ( !uDelta )
{
tWriterDocs.ZipOffset ( 0 );
break;
}
uOldDocid += uDelta;
const RowID_t * pRow = m_hDoc2Row.Find ( uOldDocid );
if ( pRow )
{
// build skiplist, aka save decoder state as needed
if ( ( iDocs & ( uSkiplistBlock-1 ) )==0 )
{
SkiplistEntry_t & t = dSkiplist.Add();
t.m_tBaseRowIDPlus1 = tSkiplistRowID+1;
t.m_iOffset = tWriterDocs.GetPos();
t.m_iBaseHitlistPos = uLastHitpos;
}
tWriterDocs.ZipOffset ( *pRow - tLastRowID );
tLastRowID = *pRow;
tSkiplistRowID = *pRow;
iDocs++;
}
if ( bInlineHits )
{
const DWORD uMatchHits = tDoclist.UnzipInt();
const DWORD uFirst = tDoclist.UnzipInt();
if ( pRow )
{
tWriterDocs.ZipInt ( uMatchHits );
tWriterDocs.ZipInt ( uFirst );
}
if ( uMatchHits==1 )
{
const DWORD uField = tDoclist.UnzipInt();
if ( pRow )
tWriterDocs.ZipInt ( uField );
} else
{
const SphOffset_t uHitPosDelta = tDoclist.UnzipOffset();
assert ( uHitPosDelta>=0 );
uLastHitpos += uHitPosDelta;
if ( pRow )
tWriterDocs.ZipOffset ( uHitPosDelta );
}
} else
{
const SphOffset_t uHitPosDelta = tDoclist.UnzipOffset();
assert ( uHitPosDelta>=0 );
const DWORD uMatchHits = tDoclist.UnzipInt();
uLastHitpos += uHitPosDelta;
if ( pRow )
{
tWriterDocs.ZipOffset ( uHitPosDelta );
tWriterDocs.ZipInt ( uMatchHits );
}
}
}
// write skiplist
SphOffset_t uSkip = (int)tWriterSkips.GetPos();
for ( int i=1; i<dSkiplist.GetLength(); i++ )
{
const SkiplistEntry_t & tPrev = dSkiplist[i-1];
const SkiplistEntry_t & tCur = dSkiplist[i];
assert ( tCur.m_tBaseRowIDPlus1 - tPrev.m_tBaseRowIDPlus1>=uSkiplistBlock );
assert ( tCur.m_iOffset - tPrev.m_iOffset>=4*uSkiplistBlock );
tWriterSkips.ZipInt ( tCur.m_tBaseRowIDPlus1 - tPrev.m_tBaseRowIDPlus1 - uSkiplistBlock );
tWriterSkips.ZipOffset ( tCur.m_iOffset - tPrev.m_iOffset - 4*uSkiplistBlock );
tWriterSkips.ZipOffset ( tCur.m_iBaseHitlistPos - tPrev.m_iBaseHitlistPos );
}
DoclistOffsets_t tOffsets;
tOffsets.m_uDoclist = uNewDoclist;
tOffsets.m_uSkiplist = uSkip;
m_hDoclist.Add ( uOldDoclist, tOffsets );
}
return true;
}
bool ConverterPlain_t::ConvertDictionary ( Index_t & tIndex, CSphString & sError )
{
CSphAutoreader tReaderDict;
if ( !tReaderDict.Open ( GetIndexFileName ( tIndex.m_sPath, "spi" ).cstr(), sError ) )
return false;
tReaderDict.SeekTo ( 1, 0 );
CSphWriter tWriterDict;
CSphString sDictName = tIndex.GetFilename ( SPH_EXT_SPI );
tWriterDict.OpenFile ( sDictName, sError );
tWriterDict.PutByte ( 1 );
const SphOffset_t iEndDict = tIndex.m_tWordlist.m_iWordsEnd;
const bool bWordDict = tIndex.m_tDictSettings.m_bWordDict;
const bool bHasMorphology = tIndex.m_pDict->HasMorphology();
SphOffset_t uDoclistOffCurLast = 0;
SphOffset_t uDoclistOffNewLast = 0;
SphWordID_t uWordid = 0;
int iWords = 0;
BYTE sDictWord[MAX_KEYWORD_BYTES+1];
while ( tReaderDict.GetPos()<iEndDict )
{
SphOffset_t uDictPos = tWriterDict.GetPos();
SphWordID_t iDeltaWord = 0;
if ( bWordDict )
iDeltaWord = tReaderDict.GetByte();
else
iDeltaWord = tReaderDict.UnzipWordid();
// checkpoint encountered, handle it
if ( !iDeltaWord )
{
int iDeltaDocs = tReaderDict.UnzipOffset();
uWordid = 0;
uDoclistOffCurLast = 0;
uDoclistOffNewLast = 0;
if ( bWordDict )
tWriterDict.PutByte ( 0 );
else
tWriterDict.ZipOffset ( 0 );
tWriterDict.ZipOffset ( iDeltaDocs );
continue;
}
int iDocs = 0;
int iHits = 0;
DoclistOffsets_t * pOff = nullptr;
if ( bWordDict )
tWriterDict.PutByte ( iDeltaWord );
else
tWriterDict.ZipOffset ( iDeltaWord );
if ( bWordDict )
{
// unpack next word
// must be in sync with DictEnd()!
BYTE uPack = (BYTE)iDeltaWord;
int iMatch, iDelta;
if ( uPack & 0x80 )
{
iDelta = ( ( uPack>>4 ) & 7 ) + 1;
iMatch = uPack & 15;
} else
{
iDelta = uPack & 127;
iMatch = tReaderDict.GetByte();
tWriterDict.PutByte ( iMatch );
}
tReaderDict.GetBytes ( sDictWord+1+iMatch, iDelta );
sDictWord [ iMatch+iDelta+1 ] = '\0';
sDictWord[0] = iMatch+iDelta;
tWriterDict.PutBytes ( sDictWord+1+iMatch, iDelta );
SphOffset_t uDoclistOffset = tReaderDict.UnzipOffset();
pOff = m_hDoclist.Find ( uDoclistOffset );
assert ( pOff );
tWriterDict.ZipOffset ( pOff->m_uDoclist );
iDocs = tReaderDict.UnzipInt();
iHits = tReaderDict.UnzipInt();
tWriterDict.ZipInt ( iDocs );
tWriterDict.ZipInt ( iHits );
if ( iDocs>=DOCLIST_HINT_THRESH )
{
BYTE uHint = tReaderDict.GetByte();
tWriterDict.PutByte ( uHint );
}
// build infixes
if ( m_pInfixer )
m_pInfixer->AddWord ( sDictWord+1, sDictWord[0], m_dCheckpoints.GetLength(), bHasMorphology );
} else
{
// finish reading the entire entry
uWordid = uWordid + iDeltaWord;
uDoclistOffCurLast = uDoclistOffCurLast + tReaderDict.UnzipOffset();
iDocs = tReaderDict.UnzipInt();
iHits = tReaderDict.UnzipInt();
pOff = m_hDoclist.Find ( uDoclistOffCurLast );
assert ( pOff );
assert ( pOff->m_uDoclist>uDoclistOffNewLast );
SphOffset_t uOffDelta = pOff->m_uDoclist - uDoclistOffNewLast;
uDoclistOffNewLast = pOff->m_uDoclist;
tWriterDict.ZipOffset ( uOffDelta );
tWriterDict.ZipInt ( iDocs );
tWriterDict.ZipInt ( iHits );
}
// skiplist
if ( iDocs>(int)SPH_SKIPLIST_BLOCK )
tReaderDict.UnzipInt();
if ( iDocs>tIndex.m_tSettings.m_iSkiplistBlockSize )
tWriterDict.ZipInt ( pOff->m_uSkiplist );
if ( ( iWords%SPH_WORDLIST_CHECKPOINT )==0 )
{
// begin new wordlist entry
Checkpoint_t & tCP = m_dCheckpoints.Add();
if ( bWordDict )
{
tCP.m_uOffset = uDictPos;
tCP.m_uWord = sphPutBytes ( &m_dKeywordCheckpoints, sDictWord, sDictWord[0]+1 ); // copy word len + word itself to checkpoint storage
} else
{
tCP.m_uOffset = uDictPos;
tCP.m_uWord = uWordid;
}
}
iWords++;
}
WriteCheckpoints ( tIndex, tWriterDict );
return true;
}
void ConverterPlain_t::WriteCheckpoints ( const Index_t & tIndex, CSphWriter & tWriterDict )
{
const bool bKeywordDict = tIndex.m_tDictSettings.m_bWordDict;
// flush infix hash entries, if any
if ( m_pInfixer )
m_pInfixer->SaveEntries ( tWriterDict );
m_tCheckpointsPosition = tWriterDict.GetPos();
if ( bKeywordDict )
{
const char * pCheckpoints = (const char *)m_dKeywordCheckpoints.Begin();
for ( const auto & i : m_dCheckpoints )
{
const char * pPacked = pCheckpoints + i.m_uWord;
int iLen = *pPacked;
assert ( iLen && (int)i.m_uWord+1+iLen<=m_dKeywordCheckpoints.GetLength() );
tWriterDict.PutDword ( iLen );
tWriterDict.PutBytes ( pPacked+1, iLen );
tWriterDict.PutOffset ( i.m_uOffset );
}
} else
{
for ( const auto & i : m_dCheckpoints )
{
tWriterDict.PutOffset ( i.m_uWord );
tWriterDict.PutOffset ( i.m_uOffset );
}
}
// flush infix hash blocks
if ( m_pInfixer )
{
m_iInfixBlockOffset = m_pInfixer->SaveEntryBlocks ( tWriterDict );
m_iInfixCheckpointWordsSize = m_pInfixer->GetBlocksWordsSize();
if ( m_iInfixBlockOffset>UINT_MAX )
sphWarning ( "INTERNAL ERROR: dictionary size " INT64_FMT " overflow at infix save", m_iInfixBlockOffset );
}
// flush header
// mostly for debugging convenience
// primary storage is in the index wide header
if ( bKeywordDict )
{
tWriterDict.PutBlob ( g_sTagDictHeader );
tWriterDict.ZipInt ( m_dCheckpoints.GetLength() );
tWriterDict.ZipOffset ( m_tCheckpointsPosition );
tWriterDict.ZipInt ( tIndex.m_pTokenizer->GetMaxCodepointLength() );
tWriterDict.ZipInt ( (DWORD)m_iInfixBlockOffset );
}
}
bool ConverterPlain_t::Save ( const CSphVector<SphDocID_t> & dKilled, Index_t & tIndex, bool bIgnoreKlist, CSphString & sError )
{
if ( !Init ( tIndex, sError ) )
return false;
if ( !WriteAttributes ( tIndex, sError ) )
return false;
if ( !ConvertDoclist ( tIndex, sError ) )
return false;
if ( !ConvertDictionary ( tIndex, sError ) )
return false;
if ( !WriteKillList ( tIndex, bIgnoreKlist, sError ) )
return false;
// dead row map save
CSphBitvec dRowmap ( tIndex.m_iTotalDocuments );
ARRAY_FOREACH ( i, dKilled )
{
RowID_t * pRow = m_hDoc2Row.Find ( dKilled[i] );
if ( pRow )
dRowmap.BitSet ( *pRow );
}
CSphString sRowMapName = tIndex.GetFilename ( SPH_EXT_SPM );
CSphWriter tRowMapWriter;
if ( !tRowMapWriter.OpenFile ( sRowMapName, sError ) )
return false;
tRowMapWriter.PutBytes ( dRowmap.Begin(), dRowmap.GetSizeBytes() );
tRowMapWriter.CloseFile();
SaveHeader ( tIndex, 0 );
return true;
}
void ConverterPlain_t::SaveHeader ( const Index_t & tIndex, DWORD uKillListSize ) const
{
CSphWriter tWriter;
CSphString sError;
tWriter.OpenFile ( tIndex.GetFilename ( SPH_EXT_SPH ), sError );
// format
tWriter.PutDword ( INDEX_MAGIC_HEADER );
tWriter.PutDword ( INDEX_FORMAT_VERSION );
// schema
WriteSchema ( tWriter, m_tSchema );
// wordlist checkpoints
tWriter.PutOffset ( m_tCheckpointsPosition );
tWriter.PutDword ( m_dCheckpoints.GetLength() );
int iInfixCodepointBytes = ( tIndex.m_tSettings.m_iMinInfixLen && tIndex.m_pDict->GetSettings().m_bWordDict ? tIndex.m_pTokenizer->GetMaxCodepointLength() : 0 );
tWriter.PutByte ( iInfixCodepointBytes ); // m_iInfixCodepointBytes, v.27+
tWriter.PutDword ( m_iInfixBlockOffset ); // m_iInfixBlocksOffset, v.27+
tWriter.PutDword ( m_iInfixCheckpointWordsSize ); // m_iInfixCheckpointWordsSize, v.34+
// stats
tWriter.PutDword ( tIndex.m_iTotalDocuments );
tWriter.PutOffset ( tIndex.m_iTotalBytes );
// index settings
tWriter.PutDword ( tIndex.m_tSettings.RawMinPrefixLen() );
tWriter.PutDword ( tIndex.m_tSettings.m_iMinInfixLen );
tWriter.PutDword ( tIndex.m_tSettings.m_iMaxSubstringLen );
tWriter.PutByte ( tIndex.m_tSettings.m_bHtmlStrip ? 1 : 0 );
tWriter.PutString ( tIndex.m_tSettings.m_sHtmlIndexAttrs.cstr () );
tWriter.PutString ( tIndex.m_tSettings.m_sHtmlRemoveElements.cstr () );
tWriter.PutByte ( tIndex.m_tSettings.m_bIndexExactWords ? 1 : 0 );
tWriter.PutDword ( tIndex.m_tSettings.m_eHitless );
tWriter.PutDword ( SPH_HIT_FORMAT_INLINE );
tWriter.PutByte ( tIndex.m_tSettings.m_bIndexSP ? 1 : 0 );
tWriter.PutString ( tIndex.m_tSettings.m_sZones );
tWriter.PutDword ( tIndex.m_tSettings.m_iBoundaryStep );
tWriter.PutDword ( tIndex.m_tSettings.m_iStopwordStep );
tWriter.PutDword ( tIndex.m_tSettings.m_iOvershortStep );
tWriter.PutDword ( tIndex.m_tSettings.m_iEmbeddedLimit );
tWriter.PutByte ( tIndex.m_tSettings.m_eBigramIndex );
tWriter.PutString ( tIndex.m_tSettings.m_sBigramWords );
tWriter.PutByte ( tIndex.m_tSettings.m_bIndexFieldLens );
tWriter.PutByte ( tIndex.m_tSettings.m_ePreprocessor==Preprocessor_e::ICU ? 1 : 0 );
tWriter.PutString(""); // was: rlp context
tWriter.PutString ( tIndex.m_tSettings.m_sIndexTokenFilter );
tWriter.PutOffset ( tIndex.m_tSettings.m_tBlobUpdateSpace );
tWriter.PutDword ( tIndex.m_tSettings.m_iSkiplistBlockSize );
tWriter.PutString ( "" ); // tSettings.m_sHitlessFiles
// tokenizer
SaveTokenizerSettings ( tWriter, tIndex.m_pTokenizer, tIndex.m_tSettings.m_iEmbeddedLimit );
// dictionary
SaveDictionarySettings ( tWriter, tIndex.m_pDict, tIndex.m_pDict->GetSettings().m_bWordDict, tIndex.m_tSettings.m_iEmbeddedLimit );
tWriter.PutOffset ( tIndex.m_iTotalDocuments );
tWriter.PutOffset ( m_tDocinfoIndex );
tWriter.PutOffset ( m_tMinMaxPos/sizeof(CSphRowitem) );
// field filter
tIndex.m_tFieldFilterSettings.Save(tWriter);
// field lengths
if ( tIndex.m_tSettings.m_bIndexFieldLens )
for ( int i=0; i <m_tSchema.GetFieldsCount(); i++ )
tWriter.PutOffset ( tIndex.m_dFieldLens[i] );
// done
tWriter.CloseFile ();
}
static void CopyAndUpdateSchema ( const Index_t & tIndex, CSphSchema & tSchema )
{
ARRAY_FOREACH ( i, tIndex.m_dSchemaFields )
tSchema.AddField ( tIndex.m_dSchemaFields[i] );
CSphColumnInfo tCol ( sphGetDocidName() );
tCol.m_eAttrType = SPH_ATTR_BIGINT;
tSchema.InsertAttr ( 0, tCol, false );
ARRAY_FOREACH ( i, tIndex.m_dSchemaAttrs )
tSchema.AddAttr ( tIndex.m_dSchemaAttrs[i], false );
if ( tSchema.HasBlobAttrs() )
{
CSphColumnInfo tBlobLocatorCol ( sphGetBlobLocatorName() );
tBlobLocatorCol.m_eAttrType = SPH_ATTR_BIGINT;
// should be right after docid
tSchema.InsertAttr ( 1, tBlobLocatorCol, false );
// rebuild locators in the schema
const char * szTmpColName = "$_tmp";
CSphColumnInfo tColTmp ( szTmpColName, SPH_ATTR_BIGINT );
tSchema.AddAttr ( tColTmp, false );
tSchema.RemoveAttr ( szTmpColName, false );
}
}
bool ConverterPlain_t::Init ( Index_t & tIndex, CSphString & sError )
{
// merge index settings with new defaults
CSphConfigSection hIndex;
CSphIndexSettings tDefaultSettings;
CSphString sWarning;
if ( !tDefaultSettings.Setup ( hIndex, tIndex.m_sName.cstr(), sWarning, sError ) )
return false;
if ( !sWarning.IsEmpty() )
sphWarning ( "%s", sWarning.cstr() );
tIndex.m_tSettings.m_tBlobUpdateSpace = tDefaultSettings.m_tBlobUpdateSpace;
tIndex.m_tSettings.m_iSkiplistBlockSize = tDefaultSettings.m_iSkiplistBlockSize;
// old schema to new schema
CopyAndUpdateSchema ( tIndex, m_tSchema );
if ( tIndex.m_tSettings.m_iMinInfixLen && tIndex.m_pDict->GetSettings().m_bWordDict )
m_pInfixer = sphCreateInfixBuilder ( tIndex.m_pTokenizer->GetMaxCodepointLength(), &sError );
return ( sError.IsEmpty() );
}
bool ConverterPlain_t::ConvertSchema ( Index_t & tIndex, CSphString & sError )
{
if ( !Init ( tIndex, sError ) )
return false;
tIndex.m_tSchema = m_tSchema;
return true;
}
bool ConverterPlain_t::WriteKillList ( const Index_t & tIndex, bool bIgnoreKlist, CSphString & sError )
{
CSphVector<DocID_t> dKillList;
if ( !bIgnoreKlist )
{
dKillList.Resize ( tIndex.m_tKillList.GetLength () );
ARRAY_FOREACH ( i, dKillList )
dKillList[i] = tIndex.m_tKillList.GetReadPtr()[i];
}
CSphString sName = tIndex.GetFilename ( SPH_EXT_SPK );
if ( !::WriteKillList ( sName, dKillList.Begin(), dKillList.GetLength(), tIndex.m_tKlistTargets, sError ) )
return false;
WarnAboutKillList ( dKillList, tIndex.m_tKlistTargets );
return true;
}
// hitlist (spp file) is same
static const char * g_dExtsOld[] = { ".sph", ".spa", ".spi", ".spd", ".spm", ".spk", ".sps", ".spe", ".mvp" };
struct ExtInfo_t
{
CSphString m_sExt;
bool m_bOptional;
};
static CSphVector<ExtInfo_t> GetExts ( const char * szPrefix, bool bOldFormat )
{
CSphVector<ExtInfo_t> dResult;
if ( bOldFormat )
{
int iCount = sizeof(g_dExtsOld) / sizeof ( g_dExtsOld[0] );
for ( int i = 0; i<iCount; i++ )
{
ExtInfo_t & tExt = dResult.Add();
tExt.m_sExt.SetSprintf ( "%s%s", szPrefix, g_dExtsOld[i] );
tExt.m_bOptional = i==iCount-1; // MVP
}
}
else
{
auto dExts = sphGetExts();
for ( const auto & i : dExts )
if ( i.m_eExt!=SPH_EXT_SPP )
{
ExtInfo_t & tExt = dResult.Add();
tExt.m_sExt.SetSprintf ( "%s%s", szPrefix, i.m_szExt );
tExt.m_bOptional = i.m_bOptional;
}
}
return dResult;
}
static bool TryRename ( const char * sPrefix, const char * sFromPostfix, const char * sToPostfix, const char * sAction, CSphString & sError )
{
char sFrom [ SPH_MAX_FILENAME_LEN ];
char sTo [ SPH_MAX_FILENAME_LEN ];
snprintf ( sFrom, sizeof(sFrom), "%s%s", sPrefix, sFromPostfix );
snprintf ( sTo, sizeof(sTo), "%s%s", sPrefix, sToPostfix );
#if _WIN32
::unlink ( sTo );
#endif
if ( rename ( sFrom, sTo ) )
{
sError.SetSprintf ( "%s: rename '%s' to '%s' failed: %s", sAction, sFrom, sTo, strerror(errno) );
return false;
}
return true;
}
static void RollbackRename ( const CSphBitvec & dProcessed, const CSphString & sPath, const CSphVector<ExtInfo_t> & dCur, const CSphVector<ExtInfo_t> & dTo, CSphString & sError )
{
CSphString sFilename;
for ( int i=0; i<dCur.GetLength(); i++ )
{
if ( !dProcessed.BitGet ( i ) )
continue;
const char * sCurExt = dCur[i].m_sExt.cstr();
const char * sToExt = dTo[i].m_sExt.cstr();
sFilename.SetSprintf ( "%s%s", sPath.cstr(), sCurExt );
TryRename ( sPath.cstr(), sCurExt, sToExt, "rollback", sError );
}
}
static bool RenameIndex ( const CSphString & sPath, const char * sAction, bool bOldFormat, const char * szPrefix1, const char * szPrefix2, CSphString & sError )
{
CSphVector<ExtInfo_t> dNextExts = GetExts ( szPrefix2, bOldFormat );
CSphVector<ExtInfo_t> dCurExts = GetExts ( szPrefix1, bOldFormat );
assert ( dCurExts.GetLength()==dNextExts.GetLength() );
bool bError = false;
CSphString sFilename;
CSphBitvec dProcessed ( dCurExts.GetLength() );
for ( int i=0; i<dCurExts.GetLength(); i++ )
{
const char * sCurExt = dCurExts[i].m_sExt.cstr();
const char * sNextExt = dNextExts[i].m_sExt.cstr();
sFilename.SetSprintf ( "%s%s", sPath.cstr(), sCurExt );
if ( sphIsReadable ( sFilename.cstr() ) )
{
if ( !TryRename ( sPath.cstr(), sCurExt, sNextExt, sAction, sError ) )
{
bError = true;
break;
}
dProcessed.BitSet(i);
}
else if ( !dCurExts[i].m_bOptional )
{
bError = true;
break;
}
}
if ( bError )
{
RollbackRename ( dProcessed, sPath, dNextExts, dCurExts, sError );
return false;
}
return true;
}
static bool RotateIndexFiles ( const CSphString & sPathIn, const CSphString & sPathOut, CSphString & sError )
{
assert ( sPathIn==sPathOut );
// rename current to old
if ( !RenameIndex ( sPathIn, "cur2old", true, "", ".old", sError ) )
return false;
// rename new to current
if ( !RenameIndex ( sPathOut, "new2cur", false, ".new", "", sError ) )
return false;
return true;
}
static bool CopyHitlist ( const CSphString & sPathIn, const CSphString & sPathOut, CSphString & sError )
{
CSphString sFrom;
CSphString sTo;
sFrom.SetSprintf ( "%s.spp", sPathIn.cstr() );
sTo.SetSprintf ( "%s.spp", sPathOut.cstr() );
CSphAutoreader tFromHit;
if ( !tFromHit.Open ( sFrom, sError ) )
return false;
CSphWriter tToHit;
if ( !tToHit.OpenFile ( sTo, sError ) )
return false;
int64_t iSize = tFromHit.GetFilesize();
CSphFixedVector<BYTE> dBuf ( Min ( iSize, 4096 ) );
while ( iSize )
{
int iReadSize = Min ( iSize, dBuf.GetLength() );
tFromHit.GetBytes ( dBuf.Begin(), iReadSize );
tToHit.PutBytes ( dBuf.Begin(), iReadSize );
if ( tFromHit.GetErrorFlag() )
{
sError = tFromHit.GetErrorMessage();
return false;
}
if ( tToHit.IsError() ) // sError already set as error buffer at writer
return false;
iSize -= iReadSize;
}
return true;
}
static bool LoadRtIndex ( Index_t & tIndex, CSphString & sError )
{
// load meta
CSphString sMetaName = GetIndexFileName ( tIndex.m_sPath, "meta" );
CSphAutoreader rdMeta;
if ( !rdMeta.Open ( sMetaName.cstr(), sError ) )
return false;
if ( rdMeta.GetDword()!=META_HEADER_MAGIC )
{
sError.SetSprintf ( "invalid meta file %s", sMetaName.cstr() );
return false;
}
DWORD uVersion = rdMeta.GetDword();
if ( uVersion==0 || uVersion>META_VERSION || uVersion<6 )
{
sError.SetSprintf ( "%s is v.%d, binary is v.%d", sMetaName.cstr(), uVersion, META_VERSION );
return false;
}
if ( uVersion>13 )
{
sError.SetSprintf ( "already a v3 table; nothing to do" );
return false;
}
const int iDiskChunks = rdMeta.GetDword();
int iDiskBase = rdMeta.GetDword();
tIndex.m_iTotalDocuments = rdMeta.GetDword();
tIndex.m_iTotalBytes = rdMeta.GetOffset();
tIndex.m_iTID = rdMeta.GetOffset();
CSphEmbeddedFiles tEmbeddedFiles;
CSphString sWarning;
// load them settings
DWORD uSettingsVer = rdMeta.GetDword();
ReadSchema ( rdMeta, tIndex.m_dSchemaFields, tIndex.m_dSchemaAttrs );
LoadIndexSettings ( tIndex.m_tSettings, rdMeta, uSettingsVer );
if ( !LoadTokenizerSettings ( rdMeta, tIndex.m_tTokSettings, tEmbeddedFiles, uSettingsVer, sError ) )
return false;
if ( tIndex.m_bStripPath )
StripPath ( tIndex.m_tTokSettings.m_sSynonymsFile );
LoadDictionarySettings ( rdMeta, tIndex.m_tDictSettings, tEmbeddedFiles, uSettingsVer, sWarning );
if ( !sWarning.IsEmpty() )
{
sphWarning ( "%s", sWarning.cstr() );
sWarning = "";
}
if ( tIndex.m_bStripPath )
{
StripPath ( tIndex.m_tDictSettings.m_sStopwords );
ARRAY_FOREACH ( i, tIndex.m_tDictSettings.m_dWordforms )
StripPath ( tIndex.m_tDictSettings.m_dWordforms[i] );
}
if ( !SetupWordProcessors ( tIndex, sError ) )
return false;
// meta v.5 checkpoint freq
tIndex.m_iWordsCheckpoint = rdMeta.GetDword();
// check that infixes definition changed - going to rebuild infixes
if ( uVersion>=7 )
{
tIndex.m_iMaxCodepointLength = rdMeta.GetDword();
rdMeta.GetByte(); // iBloomKeyLen
rdMeta.GetByte(); // iBloomHashesCount
}
if ( uVersion>=11 )
legacy::LoadFieldFilterSettings ( rdMeta, tIndex.m_tFieldFilterSettings );
if ( uVersion>=12 )
{
int iLen = (int)rdMeta.GetDword();
tIndex.m_dRtChunkNames.Reset ( iLen );
rdMeta.GetBytes ( tIndex.m_dRtChunkNames.Begin(), iLen*sizeof(int) );
}
// prior to v.12 use iDiskBase + iDiskChunks
// v.12 stores chunk list but wrong
if ( uVersion<13 )
{
tIndex.m_dRtChunkNames.Reset ( iDiskChunks );
ARRAY_FOREACH ( iChunk, tIndex.m_dRtChunkNames )
tIndex.m_dRtChunkNames[iChunk] = iChunk + iDiskBase;
}
// load ram
CSphString sRamName = GetIndexFileName ( tIndex.m_sPath, "ram" );
if ( sphIsReadable ( sRamName.cstr(), &sError ) )
{
CSphAutoreader rdChunk;
if ( !rdChunk.Open ( sRamName, sError ) )
return false;
if ( !rdChunk.GetDword () ) // !Id64
{
sError = "tables with 32-bit docids are no longer supported";
return false;
}
tIndex.m_iSegmentSeq = rdChunk.GetDword();
int iSegmentCount = rdChunk.GetDword();
if ( iSegmentCount )
{
sError = "RT table could not be converted; run FLUSH RAMCHUNK <table_name> before conversion";
return false;
}
// field lengths
int iFields = rdChunk.GetDword();
for ( int i=0; i<iFields; i++ )
tIndex.m_dFieldLens[i] = rdChunk.GetOffset();
}
return true;
}
static bool RenameRtIndex ( Index_t & tIndex, CSphString & sError )
{
CSphString sMetaNew;
sMetaNew.SetSprintf ( "%s.new.meta", tIndex.m_sPathOut.cstr() );
CSphString sChunkNew;
sChunkNew.SetSprintf ( "%s.new.ram", tIndex.m_sPathOut.cstr() );
CSphString sMetaTo, sChunkTo, sKillTo;
sMetaTo.SetSprintf ( "%s.meta", tIndex.m_sPathOut.cstr() );
sChunkTo.SetSprintf ( "%s.ram", tIndex.m_sPathOut.cstr() );
sKillTo.SetSprintf ( "%s.kill", tIndex.m_sPathOut.cstr() );
CSphString sMetaOld, sChunkOld, sKillOld;
sMetaOld.SetSprintf ( "%s.old.meta", tIndex.m_sPathOut.cstr() );
sChunkOld.SetSprintf ( "%s.old.ram", tIndex.m_sPathOut.cstr() );
sKillOld.SetSprintf ( "%s.old.kill", tIndex.m_sPathOut.cstr() );
// cur to old
bool bHasRamChunk = sphIsReadable ( sChunkTo.cstr() );
if ( bHasRamChunk && ::rename ( sChunkTo.cstr(), sChunkOld.cstr() ) )
{
sError.SetSprintf ( "failed to rename ram chunk (src=%s, dst=%s, errno=%d, error=%s)", sChunkTo.cstr(), sChunkOld.cstr(), errno, strerror(errno) );
return false;
}
if ( ::rename ( sMetaTo.cstr(), sMetaOld.cstr() ) )
{
sError.SetSprintf ( "failed to rename meta (src=%s, dst=%s, errno=%d, error=%s)", sMetaTo.cstr(), sMetaOld.cstr(), errno, strerror(errno) );
return false;
}
if ( ::rename ( sKillTo.cstr(), sKillOld.cstr() ) && bHasRamChunk )
{
sError.SetSprintf ( "failed to rename killlist (src=%s, dst=%s, errno=%d, error=%s)", sKillTo.cstr(), sKillOld.cstr(), errno, strerror(errno) );
return false;
}
// new to cur
if ( ::rename ( sChunkNew.cstr(), sChunkTo.cstr() ) )
{
sError.SetSprintf ( "failed to rename ram chunk (src=%s, dst=%s, errno=%d, error=%s)", sChunkNew.cstr(), sChunkTo.cstr(), errno, strerror(errno) );
return false;
}
if ( ::rename ( sMetaNew.cstr(), sMetaTo.cstr() ) )
{
sError.SetSprintf ( "failed to rename meta (src=%s, dst=%s, errno=%d, error=%s)", sMetaNew.cstr(), sMetaTo.cstr(), errno, strerror(errno) );
return false;
}
return true;
}
static bool SaveRtIndex ( Index_t & tIndex, CSphString & sWarning, CSphString & sError )
{
// no disk chunks - need to copy old schema from meta and update it if necessary
if ( !tIndex.m_dRtChunkNames.GetLength() )
CopyAndUpdateSchema ( tIndex, tIndex.m_tSchema );
// merge index settings with new defaults
CSphConfigSection hIndex;
CSphIndexSettings tDefaultSettings;
if ( !tDefaultSettings.Setup ( hIndex, tIndex.m_sName.cstr(), sWarning, sError ) )
return false;
// write new meta
CSphString sMetaNew;
sMetaNew.SetSprintf ( "%s.new.meta", tIndex.m_sPathOut.cstr() );
CSphWriter wrMeta;
if ( !wrMeta.OpenFile ( sMetaNew, sError ) )
return false;
wrMeta.PutDword ( META_HEADER_MAGIC );
wrMeta.PutDword ( META_VERSION );
wrMeta.PutDword ( (DWORD)tIndex.m_iTotalDocuments ); // FIXME? we don't expect over 4G docs per just 1 local index
wrMeta.PutOffset ( tIndex.m_iTotalBytes ); // FIXME? need PutQword ideally
wrMeta.PutOffset ( tIndex.m_iTID );
// meta v.4, save disk index format and settings, too
wrMeta.PutDword ( INDEX_FORMAT_VERSION );
WriteSchema ( wrMeta, tIndex.m_tSchema );
// index settings
wrMeta.PutDword ( tIndex.m_tSettings.RawMinPrefixLen() );
wrMeta.PutDword ( tIndex.m_tSettings.m_iMinInfixLen );
wrMeta.PutDword ( tIndex.m_tSettings.m_iMaxSubstringLen );
wrMeta.PutByte ( tIndex.m_tSettings.m_bHtmlStrip ? 1 : 0 );
wrMeta.PutString ( tIndex.m_tSettings.m_sHtmlIndexAttrs.cstr () );
wrMeta.PutString ( tIndex.m_tSettings.m_sHtmlRemoveElements.cstr () );
wrMeta.PutByte ( tIndex.m_tSettings.m_bIndexExactWords ? 1 : 0 );
wrMeta.PutDword ( tIndex.m_tSettings.m_eHitless );
wrMeta.PutDword ( SPH_HIT_FORMAT_INLINE );
wrMeta.PutByte ( tIndex.m_tSettings.m_bIndexSP ? 1 : 0 );
wrMeta.PutString ( tIndex.m_tSettings.m_sZones );
wrMeta.PutDword ( tIndex.m_tSettings.m_iBoundaryStep );
wrMeta.PutDword ( tIndex.m_tSettings.m_iStopwordStep );
wrMeta.PutDword ( tIndex.m_tSettings.m_iOvershortStep );
wrMeta.PutDword ( tIndex.m_tSettings.m_iEmbeddedLimit );
wrMeta.PutByte ( tIndex.m_tSettings.m_eBigramIndex );
wrMeta.PutString ( tIndex.m_tSettings.m_sBigramWords );
wrMeta.PutByte ( tIndex.m_tSettings.m_bIndexFieldLens );
wrMeta.PutByte ( tIndex.m_tSettings.m_ePreprocessor==Preprocessor_e::ICU ? 1 : 0 );
wrMeta.PutString (""); // was: RLP context
wrMeta.PutString ( tIndex.m_tSettings.m_sIndexTokenFilter );
wrMeta.PutOffset ( tDefaultSettings.m_tBlobUpdateSpace );
wrMeta.PutDword ( tDefaultSettings.m_iSkiplistBlockSize );
wrMeta.PutString ( "" ); // tSettings.m_sHitlessFiles
// tokenizer
SaveTokenizerSettings ( wrMeta, tIndex.m_pTokenizer, tIndex.m_tSettings.m_iEmbeddedLimit );
// dictionary
SaveDictionarySettings ( wrMeta, tIndex.m_pDict, tIndex.m_pDict->GetSettings().m_bWordDict, tIndex.m_tSettings.m_iEmbeddedLimit );
// meta v.5
wrMeta.PutDword ( tIndex.m_iWordsCheckpoint );
// meta v.7
wrMeta.PutDword ( tIndex.m_iMaxCodepointLength );
// should be rebuild on load
wrMeta.PutByte ( 0 ); // BLOOM_PER_ENTRY_VALS_COUNT
wrMeta.PutByte ( 0 ); // BLOOM_HASHES_COUNT
// meta v.11
tIndex.m_tFieldFilterSettings.Save(wrMeta);
// meta v.12
wrMeta.PutDword ( tIndex.m_dRtChunkNames.GetLength () );
wrMeta.PutBytes ( tIndex.m_dRtChunkNames.Begin(), tIndex.m_dRtChunkNames.GetLengthBytes64 () );
// meta v.17
wrMeta.PutOffset ( DEFAULT_RT_MEM_LIMIT );
wrMeta.CloseFile();
// ram chunk
CSphString sChunkNew;
sChunkNew.SetSprintf ( "%s.new.ram", tIndex.m_sPathOut.cstr() );
CSphWriter wrChunk;
if ( !wrChunk.OpenFile ( sChunkNew, sError ) )
return false;
wrChunk.PutDword ( tIndex.m_iSegmentSeq );
wrChunk.PutDword ( 0 ); // N of RAM segs
// field lengths
wrChunk.PutDword ( tIndex.m_dSchemaFields.GetLength() );
ARRAY_FOREACH ( i, tIndex.m_dSchemaFields )
wrChunk.PutOffset ( tIndex.m_dFieldLens[i] );
wrChunk.CloseFile();
if ( tIndex.m_sPath==tIndex.m_sPathOut && !RenameRtIndex ( tIndex, sError ) )
return false;
return true;
}
static bool ConvertPlain ( const CSphString & sName, const CSphString & sPath, bool bStripPath, CSphString & sError, const CSphVector<SphDocID_t> & dKilled, const CSphString & sPathOut, const KillListTargets_c & tKlistTargets, Index_t * pRtIndex, bool bIgnoreKlist=false )
{
// need scope for destructor
{
Index_t tIndex;
tIndex.m_sName = sName;
tIndex.m_sPath = sPath;
tIndex.m_sPathOut = sPathOut;
tIndex.m_bStripPath = bStripPath;
tIndex.m_tKlistTargets = tKlistTargets;
bool bLoaded = LoadPlainIndexChunk ( tIndex, sError );
if ( !bLoaded )
{
sError.SetSprintf ( "failed to load table '%s', error: %s", sName.cstr(), sError.cstr() );
return false;
}
ConverterPlain_t tConverter;
if ( !tConverter.Save ( dKilled, tIndex, bIgnoreKlist, sError ) )
{
sError.SetSprintf ( "failed to convert table '%s', error: %s", sName.cstr(), sError.cstr() );
return false;
}
if ( pRtIndex )
pRtIndex->m_tSchema = tConverter.GetSchema();
}
// rename only in case output-dir set
if ( sPath==sPathOut )
{
if ( !RotateIndexFiles ( sPath, sPathOut, sError ) )
{
sError.SetSprintf ( "failed to rename table '%s', error: %s", sName.cstr(), sError.cstr() );
return false;
}
} else
{
if ( !CopyHitlist ( sPath, sPathOut, sError ) )
{
sError.SetSprintf ( "failed to copy hitlist table '%s', error: %s", sName.cstr(), sError.cstr() );
return false;
}
}
return true;
}
static void GetKilledDocs ( const CSphFixedVector<int> & dRtChunkNames, int iCurrentChunk, const CSphString & sPath, CSphVector<SphDocID_t> & dKilled )
{
dKilled.Resize ( 0 );
CSphString sError;
CSphString sKillName;
for ( int i=iCurrentChunk+1; i<dRtChunkNames.GetLength(); i++ )
{
sKillName.SetSprintf ( "%s.%d.spk", sPath.cstr(), dRtChunkNames[i] );
CSphAutoreader tKill;
if ( sphIsReadable ( sKillName.cstr() ) && !tKill.Open ( sKillName.cstr(), sError ) )
{
sphWarning ( "%s", sError.cstr() );
continue;
}
auto iCount = (int) (tKill.GetFilesize() / sizeof(SphDocID_t));
if ( !iCount )
continue;
int iOff = dKilled.GetLength();
dKilled.Resize ( iOff + (int) iCount );
for ( int iElem=0; iElem<iCount; iElem++ )
dKilled[iOff+iElem] = tKill.GetOffset();
}
dKilled.Uniq(); // get rid of duplicates
}
static bool LoadPqIndex ( Index_t & tIndex, CSphString & sError )
{
// load meta
CSphString sMetaName = GetIndexFileName ( tIndex.m_sPath, "meta" );
CSphAutoreader rdMeta;
if ( !rdMeta.Open ( sMetaName.cstr(), sError ) )
return false;
if ( rdMeta.GetDword()!=PQ_META_HEADER_MAGIC )
{
sError.SetSprintf ( "invalid meta file %s", sMetaName.cstr() );
return false;
}
DWORD uVersion = rdMeta.GetDword();
if ( uVersion>PQ_META_VERSION )
{
sError.SetSprintf ( "already a v3 table; nothing to do" );
return false;
}
if ( uVersion==0 || uVersion>PQ_META_VERSION )
{
sError.SetSprintf ( "%s is v.%d, binary is v.%d", sMetaName.cstr(), uVersion, PQ_META_VERSION );
return false;
}
DWORD uSettingsVer = rdMeta.GetDword();
CSphTokenizerSettings tTokenizerSettings;
CSphDictSettings tDictSettings;
// load settings
legacy::ReadSchema ( rdMeta, tIndex.m_dSchemaFields, tIndex.m_dSchemaAttrs );
legacy::LoadIndexSettings ( tIndex.m_tSettings, rdMeta, uSettingsVer );
if ( !legacy::LoadTokenizerSettings ( rdMeta, tIndex.m_tTokSettings, tIndex.m_tEmbeddedTok, uSettingsVer, sError ) )
return false;
legacy::LoadDictionarySettings ( rdMeta, tIndex.m_tDictSettings, tIndex.m_tEmbeddedDict, uSettingsVer, sError );
if ( !SetupWordProcessors ( tIndex, sError ) )
return false;
if ( uVersion>=6 )
legacy::LoadFieldFilterSettings ( rdMeta, tIndex.m_tFieldFilterSettings );
// queries
DWORD uQueries = rdMeta.GetDword();
tIndex.m_dStored.Reset ( uQueries );
ARRAY_FOREACH ( i, tIndex.m_dStored )
{
StoredQueryDesc_t & tQuery = tIndex.m_dStored[i];
if ( uVersion<7 )
LoadStoredQueryV6 ( uVersion, tQuery, rdMeta );
else
LoadStoredQuery ( uVersion, tQuery, rdMeta );
}
if ( uVersion>=7 )
tIndex.m_iTID = rdMeta.GetOffset();
if ( rdMeta.GetErrorFlag() )
{
sError = rdMeta.GetErrorMessage();
return false;
}
return true;
}
static bool SavePqIndex ( Index_t & tIndex, CSphString & sWarning, CSphString & sError )
{
ConverterPlain_t tConverter;
if ( !tConverter.ConvertSchema ( tIndex, sError ) )
return false;
// merge index settings with new defaults
CSphConfigSection hIndex;
CSphIndexSettings tDefaultSettings;
if ( !tDefaultSettings.Setup ( hIndex, tIndex.m_sName.cstr(), sWarning, sError ) )
return false;
// write new meta
CSphString sMetaNew;
sMetaNew.SetSprintf ( "%s.new.meta", tIndex.m_sPathOut.cstr() );
CSphWriter wrMeta;
if ( !wrMeta.OpenFile ( sMetaNew, sError ) )
return false;
wrMeta.PutDword ( PQ_META_HEADER_MAGIC );
wrMeta.PutDword ( PQ_META_VERSION+1 );
wrMeta.PutDword ( INDEX_FORMAT_VERSION );
WriteSchema ( wrMeta, tIndex.m_tSchema );
// index settings
wrMeta.PutDword ( tIndex.m_tSettings.RawMinPrefixLen() );
wrMeta.PutDword ( tIndex.m_tSettings.m_iMinInfixLen );
wrMeta.PutDword ( tIndex.m_tSettings.m_iMaxSubstringLen );
wrMeta.PutByte ( tIndex.m_tSettings.m_bHtmlStrip ? 1 : 0 );
wrMeta.PutString ( tIndex.m_tSettings.m_sHtmlIndexAttrs.cstr () );
wrMeta.PutString ( tIndex.m_tSettings.m_sHtmlRemoveElements.cstr () );
wrMeta.PutByte ( tIndex.m_tSettings.m_bIndexExactWords ? 1 : 0 );
wrMeta.PutDword ( tIndex.m_tSettings.m_eHitless );
wrMeta.PutDword ( tIndex.m_tSettings.m_eHitFormat );
wrMeta.PutByte ( tIndex.m_tSettings.m_bIndexSP );
wrMeta.PutString ( tIndex.m_tSettings.m_sZones );
wrMeta.PutDword ( tIndex.m_tSettings.m_iBoundaryStep );
wrMeta.PutDword ( tIndex.m_tSettings.m_iStopwordStep );
wrMeta.PutDword ( tIndex.m_tSettings.m_iOvershortStep );
wrMeta.PutDword ( tIndex.m_tSettings.m_iEmbeddedLimit );
wrMeta.PutByte ( tIndex.m_tSettings.m_eBigramIndex );
wrMeta.PutString ( tIndex.m_tSettings.m_sBigramWords );
wrMeta.PutByte ( tIndex.m_tSettings.m_bIndexFieldLens );
wrMeta.PutByte ( tIndex.m_tSettings.m_ePreprocessor==Preprocessor_e::ICU ? 1 : 0 );
wrMeta.PutString(""); // was: RLP context
wrMeta.PutString ( tIndex.m_tSettings.m_sIndexTokenFilter );
wrMeta.PutOffset ( tIndex.m_tSettings.m_tBlobUpdateSpace );
wrMeta.PutDword ( tIndex.m_tSettings.m_iSkiplistBlockSize );
SaveTokenizerSettings ( wrMeta, tIndex.m_pTokenizer, tIndex.m_tSettings.m_iEmbeddedLimit );
SaveDictionarySettings ( wrMeta, tIndex.m_pDict, false, tIndex.m_tSettings.m_iEmbeddedLimit );
tIndex.m_tFieldFilterSettings.Save(wrMeta);
wrMeta.PutDword ( tIndex.m_dStored.GetLength() );
ARRAY_FOREACH ( i, tIndex.m_dStored )
SaveStoredQuery ( tIndex.m_dStored[i], wrMeta );
wrMeta.PutOffset ( tIndex.m_iTID );
wrMeta.CloseFile();
if ( tIndex.m_sPath==tIndex.m_sPathOut )
{
CSphString sMetaTo;
sMetaTo.SetSprintf ( "%s.meta", tIndex.m_sPathOut.cstr() );
CSphString sMetaOld;
sMetaOld.SetSprintf ( "%s.old.meta", tIndex.m_sPathOut.cstr() );
if ( ::rename ( sMetaTo.cstr(), sMetaOld.cstr() ) )
{
sError.SetSprintf ( "failed to rename meta (src=%s, dst=%s, errno=%d, error=%s)", sMetaTo.cstr(), sMetaOld.cstr(), errno, strerror(errno) );
return false;
}
if ( ::rename ( sMetaNew.cstr(), sMetaTo.cstr() ) )
{
sError.SetSprintf ( "failed to rename meta (src=%s, dst=%s, errno=%d, error=%s)", sMetaNew.cstr(), sMetaTo.cstr(), errno, strerror(errno) );
return false;
}
}
return true;
}
static bool Convert ( const CSphString & sName, const CSphString & sPath, IndexType_e eType, bool bStripPath, const CSphString & sPathOut, const KillListTargets_c & dKlistTargets, CSphString & sError )
{
if ( eType==INDEX_UNKNOWN )
{
if ( sphIsReadable ( GetIndexFileName( sPath, "spa" ).cstr() ) )
eType = INDEX_PLAIN;
else if ( sphIsReadable ( GetIndexFileName( sPath, "meta" ).cstr() ) && sphIsReadable ( GetIndexFileName( sPath, "ram" ).cstr() ) )
eType = INDEX_RT;
else if ( sphIsReadable ( GetIndexFileName( sPath, "meta" ).cstr() ) )
eType = INDEX_PQ;
}
if ( eType==INDEX_UNKNOWN )
{
sError.SetSprintf ( "unknown table type '%s'", sName.cstr() );
return false;
}
printf ( "converting %s table '%s'\n", g_sIndexType[eType], sName.cstr() );
if ( eType==INDEX_RT )
{
Index_t tIndex;
tIndex.m_sName = sName;
tIndex.m_sPath = sPath;
tIndex.m_sPathOut = sPathOut;
tIndex.m_bStripPath = bStripPath;
if ( !LoadRtIndex ( tIndex, sError ) )
{
sError.SetSprintf ( "failed to load table '%s', error: %s", sName.cstr(), sError.cstr() );
return false;
}
CSphVector<SphDocID_t> dKilled;
int iChunk = 0;
CSphString sChunkInPath, sChunkOutPath;
for ( ; iChunk<tIndex.m_dRtChunkNames.GetLength(); iChunk++ )
{
GetKilledDocs ( tIndex.m_dRtChunkNames, iChunk, sPath, dKilled );
sChunkInPath.SetSprintf ( "%s.%d", sPath.cstr(), tIndex.m_dRtChunkNames[iChunk] );
sChunkOutPath.SetSprintf ( "%s.%d", sPathOut.cstr(), tIndex.m_dRtChunkNames[iChunk] );
Index_t * pSchema = ( iChunk==tIndex.m_dRtChunkNames.GetLength() - 1 ? &tIndex : nullptr );
if ( !ConvertPlain ( sName, sChunkInPath, bStripPath, sError, dKilled, sChunkOutPath, KillListTargets_c(), pSchema, true ) )
{
sphWarning ( "failed to convert %d disk chunk, error: %s, renaming original disk chunks back ...", iChunk, sError.cstr() );
break;
}
}
if ( iChunk!=tIndex.m_dRtChunkNames.GetLength() )
{
// rename back converted files from old to current up to failed iChunk but not iChunk itself
for ( int iRoll=0; iRoll<iChunk && tIndex.IsSeparateOutDir(); iRoll++ )
{
sChunkInPath.SetSprintf ( "%s.%d", sPath.cstr(), tIndex.m_dRtChunkNames[iRoll] );
// rename current to old
if ( !RenameIndex ( sPath, "old2cur", true, ".old", "", sError ) )
{
sphWarning ( "failed to rename old back to current at %d disk chunk, fix it manually, error: %s", iRoll, sError.cstr() );
break;
}
}
sError.SetSprintf ( "conversion failed for table '%s', error: %s", sName.cstr(), sError.cstr() );
return false;
}
CSphString sWarning;
if ( !SaveRtIndex ( tIndex, sWarning, sError ) )
{
sError.SetSprintf ( "conversion failed for table '%s', error: %s", sName.cstr(), sError.cstr() );
return false;
}
if ( !sWarning.IsEmpty() )
sphWarning ( "%s", sWarning.cstr() );
} else if ( eType==INDEX_PQ )
{
Index_t tIndex;
tIndex.m_sName = sName;
tIndex.m_sPath = sPath;
tIndex.m_sPathOut = sPathOut;
if ( !LoadPqIndex ( tIndex, sError ) )
{
sError.SetSprintf ( "failed to load table '%s', error: %s", sName.cstr(), sError.cstr() );
return false;
}
CSphString sWarning;
if ( !SavePqIndex ( tIndex, sWarning, sError ) )
{
sError.SetSprintf ( "conversion failed for table '%s', error: %s", sName.cstr(), sError.cstr() );
return false;
}
if ( !sWarning.IsEmpty() )
sphWarning ( "%s", sWarning.cstr() );
} else
{
CSphVector<SphDocID_t> dKilled;
if ( !ConvertPlain ( sName, sPath, bStripPath, sError, dKilled, sPathOut, dKlistTargets, nullptr ) )
return false;
}
return true;
}
}
static void ShowVersion ()
{
fprintf ( stdout, "%s", szMANTICORE_BANNER );
}
static void ShowHelp ()
{
printf (
"index_converter, a tool to convert table files from 2.X to 3.0 format\n"
"\n"
"Usage:\n"
"index_converter --config manticore.conf --table test\n"
"index_converter --path path_to_table_files --killlist-target main_tbl:id\n"
"index_converter --config manticore.conf --all --output-dir converted\n"
"\n"
"Options are:\n"
"-c, --config <file>\t\tread configuration from specified file\n"
"--table <name>\t\t\tconvert table defined in config file\n"
"--path <path_to_table_files>\tconvert table from path provided\n"
"--strip-path\t\t\tstrip path from filenames referenced by table: stopwords, exceptions and wordforms\n"
"--large-docid\t\t\tallows to convert documents with ids larger than 2^63 and display a warning, otherwise it will just exit on the large id with an error\n"
"--output-dir <dir>\t\toutput directory for converted files\n"
"--all\t\t\t\tconvert all tables in config file\n"
"--killlist-target <targets>\tsets the tables that the kill-list will be applied to\n"
);
}
int main ( int argc, char ** argv )
{
if ( argc<2 )
{
ShowVersion();
ShowHelp();
exit ( 0 );
}
CSphString sError;
CSphString sConfig;
CSphString sIndexName;
CSphString sIndexIn;
CSphString sIndexOut;
CSphString sIndexFile;
CSphString sKlistTarget;
bool bKlistTargetCLI = false;
bool bStripPath = false;
bool bAll = false;
for ( int i=1; i<argc; i++ )
{
if ( strcmp ( argv[i], "-c" )==0 || strcmp ( argv[i], "--config" )==0 )
{
if ( ++i>=argc )
sphDie ( "config requires an argument" );
sConfig = argv[i];
} else if ( strcmp ( argv[i], "-i" )==0 || strcmp ( argv[i], "--index" )==0 || strcmp ( argv[i], "-t" )==0 || strcmp ( argv[i], "--table" )==0 )
{
if ( ++i>=argc )
sphDie ( "table name requires an argument" );
sIndexName = argv[i];
} else if ( strcmp ( argv[i], "--path")==0 )
{
if ( ++i>=argc )
sphDie ( "path to table requires an argument" );
sIndexIn = argv[i];
} else if ( strcmp ( argv[i], "--strip-path")==0 )
{
bStripPath = true;
} else if ( strcmp ( argv[i], "--large-docid")==0 )
{
legacy::g_bLargeDocid = true;
} else if ( strcmp ( argv[i], "-v" )==0 )
{
ShowVersion();
exit(0);
} else if ( strcmp ( argv[i], "-h" )==0 || strcmp ( argv[i], "--help" )==0 )
{
ShowVersion();
ShowHelp();
exit(0);
} else if ( strcmp ( argv[i], "--output-dir")==0 )
{
if ( ++i>=argc )
sphDie ( "output directory requires an argument" );
legacy::g_sOutDir = argv[i];
} else if ( strcmp ( argv[i], "--all")==0 )
{
bAll = true;
} else if ( strcmp ( argv[i], "--killlist-target" )==0 )
{
if ( ++i>=argc )
sphDie ( "killlist target requires an argument" );
bKlistTargetCLI = true;
sKlistTarget = argv[i];
} else
{
sphDie ( "unknown switch: %s", argv[i] );
}
}
if ( sIndexIn.IsEmpty() && ( sConfig.IsEmpty() || sIndexName.IsEmpty() ) && !bAll )
sphDie ( "nothing to do" );
if ( bAll && !sKlistTarget.IsEmpty() )
sphDie ( "killlist-target not compatible with --all option" );
KillListTargets_c tKlistTargets;
if ( !sKlistTarget.IsEmpty() && !tKlistTargets.Parse ( sKlistTarget, sIndexFile.cstr(), sError ) )
sphDie ( "failed to parse killlist-target, '%s'", sError.cstr() );
if ( !sphInitCharsetAliasTable ( sError ) )
sphDie ( "failed to init charset alias table: %s", sError.cstr() );
const CSphConfigType * pIndexes = nullptr;
if ( !sConfig.IsEmpty() )
{
CSphConfig hConfig = sphLoadConfig ( sConfig, true );
pIndexes = hConfig ( "index" );
if ( ( bAll || !sIndexName.IsEmpty() ) && !pIndexes )
sphDie ( "no tables found in config" );
sphConfigureCommon ( hConfig );
}
int iConvertedCount = 0;
int iIndexTotal = 0;
StrVec_t dNameParts;
auto pItt = pIndexes->begin();
while ( true )
{
if ( !bAll && iIndexTotal )
break;
if ( bAll )
{
++pItt;
if ( pItt==pIndexes->end() )
break;
sIndexName = pItt->first;
tKlistTargets.m_dTargets.Resize(0);
sKlistTarget = "";
}
sError = "";
dNameParts.Resize ( 0 );
iIndexTotal++;
legacy::IndexType_e eIndex = legacy::INDEX_UNKNOWN;
if ( bAll || !sIndexName.IsEmpty() )
{
if ( !bAll && !pIndexes->Exists ( sIndexName ) )
{
sphWarning ( "no such table '%s', skipped", sIndexName.cstr() );
continue;
}
const CSphConfigSection& tIndex = ( bAll ? pItt->second : ( *pIndexes )[sIndexName] );
if ( tIndex.Exists ( "type" ) )
{
const CSphString sType = tIndex.GetStr ( "type", NULL );
if ( sType.IsEmpty() )
{
sphWarning ( "unknown table '%s' type '%s', skipped", sIndexName.cstr(), sType.cstr() );
continue;
}
if ( sType!="rt" && sType!="plain" && sType!="percolate" )
{
sphWarning ( "table '%s' type '%s', only 'plain' or 'rt' or 'percolate' types supported, skipped", sIndexName.cstr(), sType.cstr() );
continue;
}
eIndex = ( sType=="rt" ? legacy::INDEX_RT : ( sType=="percolate" ? legacy::INDEX_PQ : legacy::INDEX_PLAIN ) );
}
if ( !tIndex.Exists ( "path" ) )
{
sphWarning ( "no table path '%s', skipped", sIndexName.cstr() );
continue;
}
sIndexIn = tIndex["path"].cstr();
sphSplit ( dNameParts, sIndexIn.cstr(), "/\\" );
sIndexFile = dNameParts.Last();
if ( tIndex.Exists( "killlist_target" ) )
{
if ( bKlistTargetCLI )
sphWarning ( "--killlist-target specified in command line overrides killlist_target from config '%s'", sConfig.cstr() );
else
{
sKlistTarget = tIndex["killlist_target"].cstr();
if ( !tKlistTargets.Parse ( sKlistTarget, sIndexName.cstr(), sError ) )
{
sphWarning ( "failed to parse killlist_target, '%s'", sError.cstr() );
tKlistTargets.m_dTargets.Resize(0);
sKlistTarget = "";
sError = "";
}
}
}
} else
{
sphSplit ( dNameParts, sIndexIn.cstr(), "/\\" );
sIndexName = dNameParts.Last();
sIndexFile = dNameParts.Last();
}
if ( !legacy::g_sOutDir.IsEmpty() )
{
CSphWriter tDir;
CSphString sTmpName;
sTmpName.SetSprintf ( "%s/%s.tmp", legacy::g_sOutDir.cstr(), sIndexFile.cstr() );
if ( !tDir.OpenFile ( sTmpName, sError ) )
{
sphWarning ( "output-dir failed '%s', skipped", sError.cstr() );
continue;
}
// tDir will be unlinked as non-closed
sIndexOut.SetSprintf ( "%s/%s", legacy::g_sOutDir.cstr(), sIndexFile.cstr() );
} else
{
sIndexOut = sIndexIn;
}
bool bOk = legacy::Convert ( sIndexName, sIndexIn, eIndex, bStripPath, sIndexOut, tKlistTargets, sError );
if ( !bOk )
{
sphWarning ( "%s", sError.cstr() );
} else
{
printf ( "converted table '%s'%s%s\n", sIndexName.cstr(),
( sKlistTarget.IsEmpty() ? "" : " with killlist_target=" ), ( sKlistTarget.IsEmpty() ? "" : sKlistTarget.cstr() ) );
iConvertedCount++;
}
}
printf ( "converted tables %d(%d)\n", iConvertedCount, iIndexTotal );
return 0;
}
| 81,276
|
C++
|
.cpp
| 2,204
| 34.060345
| 272
| 0.709413
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,940
|
sphinxstemcz.cpp
|
manticoresoftware_manticoresearch/src/sphinxstemcz.cpp
|
//
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxstd.h"
#include <string.h>
struct ClampRule_t
{
int m_iMinLength;
BYTE m_szSuffix[10];
int m_iCheckLength;
int m_nRemove;
bool m_bPalatalize;
};
static ClampRule_t g_dCaseRules [] =
{
{ 7, "atech", 5, 5, false },
{ 6, "\xECtem", 4, 3, true }, // \u011b
{ 6, "at\xF9m", 4, 4, false }, // \u016f
{ 5, "ech", 3, 2, true },
{ 5, "ich", 3, 2, true },
{ 5, "\xED!ch", 3, 2, true }, // \u00ed
{ 5, "\xE9ho", 3, 2, true }, // \u00e9
{ 5, "\xECmi", 3, 2, true }, // \u011b
{ 5, "emi", 3, 2, true },
{ 5, "\xE9mu", 3, 2, true }, // \u00e9
{ 5, "\xECte", 3, 2, true }, // \u011b
{ 5, "\xECti", 3, 2, true }, // \u011b
{ 5, "iho", 3, 2, true },
{ 5, "\xEDho", 3, 2, true }, // \u00ed
{ 5, "\xEDmi", 3, 2, true }, // \u00ed
{ 5, "imu", 3, 2, true },
{ 5, "\xE1!ch", 3, 3, false }, // \u00e1
{ 5, "ata", 3, 3, false },
{ 5, "aty", 3, 3, false },
{ 5, "\xFD!ch", 3, 3, false }, // \u00fd
{ 5, "ama", 3, 3, false },
{ 5, "ami", 3, 3, false },
{ 5, "ov\xE9", 3, 3, false }, // \u00e9
{ 5, "ovi", 3, 3, false },
{ 5, "\xFDmi", 3, 3, false }, // \u00fd
{ 4, "em", 2, 1, true },
{ 4, "es", 2, 2, true },
{ 4, "\xE9m", 2, 2, true }, // \u00e9
{ 4, "\xEDm", 2, 2, true }, // \u00ed
{ 4, "\xF9!fm", 2, 2, false }, // \u016f
{ 4, "at", 2, 2, false },
{ 4, "\xE1m", 2, 2, false }, // \u00e1
{ 4, "os", 2, 2, false },
{ 4, "us", 2, 2, false },
{ 4, "\xFDm", 2, 2, false }, // \u00fd
{ 4, "mi", 2, 2, false },
{ 4, "ou", 2, 2, false },
{ 3, "e", 1, 0, true },
{ 3, "i", 1, 0, true },
{ 3, "\xED", 1, 0, true }, // \u00ed
{ 3, "\xEC", 1, 0, true }, // \u011b
{ 3, "u", 1, 1, false },
{ 3, "y", 1, 1, false },
{ 3, "\xF9", 1, 1, false }, // \u016f
{ 3, "a", 1, 1, false },
{ 3, "o", 1, 1, false },
{ 3, "\xE1", 1, 1, false }, // \u00e1
{ 3, "\xE9", 1, 1, false }, // \u00e9
{ 3, "\xFD", 1, 1, false } // \u00fd
};
static ClampRule_t g_dPosessiveRules [] =
{
{ 5, "ov", 2, 2, false },
{ 5, "\xF9v", 2, 2, false },
{ 5, "in", 2, 1, true },
};
struct ReplaceRule_t
{
BYTE m_szSuffix[4];
int m_iRemoveLength;
BYTE m_szAppend[4];
};
static ReplaceRule_t g_dPalatalizeRules [] =
{
{ "ci", 2, "k" },
{ "ce", 2, "k" },
{ "\xE8i", 2, "k" }, // \u010d
{ "\xE8!e", 2, "k" }, // \u010d
{ "zi", 2, "h" },
{ "ze", 2, "h" },
{ "\x9Ei", 2, "h" }, // \u017e
{ "\x9E!e", 2, "h" }, // \u017e
{ "\xE8t\xEC", 3, "ck" }, // \u010d \u011b
{ "\xE8ti", 3, "ck" },
{ "\xE8t\xED", 3, "ck" }, // \u010d \u00ed
{ "\x9At\xEC", 3, "sk" }, // \u0161 \u011b // was: check 2, remove 2
{ "\x9Ati", 3, "sk" }, // \u0161 // was: check 2, remove 2
{ "\x9At\xED", 3, "sk" }, // \u0161 \u00ed // was: check 2, remove 2
};
static void Palatalize ( BYTE * word )
{
if ( !word )
return;
int nRules = sizeof ( g_dPalatalizeRules ) / sizeof ( g_dPalatalizeRules[0] );
auto iWordLength = (int) strlen ( (char*)word );
for ( int i = 0; i < nRules; ++i )
{
const ReplaceRule_t & Rule = g_dPalatalizeRules[i];
if ( iWordLength>=Rule.m_iRemoveLength &&
!strncmp ( (char*)word + iWordLength - Rule.m_iRemoveLength, (char*)Rule.m_szSuffix, Rule.m_iRemoveLength ) )
{
word [iWordLength - Rule.m_iRemoveLength] = '\0';
strcat ( (char*)word, (char*)Rule.m_szAppend ); // NOLINT strcat
return;
}
}
if ( iWordLength > 0 )
word [iWordLength - 1] = '\0';
}
static void ApplyRules ( BYTE * word, const ClampRule_t * pRules, int nRules )
{
if ( !word || !pRules )
return;
auto iWordLength = (int) strlen ( (char *)word );
for ( int i = 0; i < nRules; ++i )
{
const ClampRule_t & Rule = pRules[i];
if ( iWordLength > Rule.m_iMinLength &&
!strncmp ( (char*)word + iWordLength - Rule.m_iCheckLength, (char*)Rule.m_szSuffix, Rule.m_iCheckLength ) )
{
word [iWordLength - Rule.m_nRemove] = '\0';
Palatalize ( word );
return;
}
}
}
static void RemoveChars ( char * szString, char cChar )
{
char * szPos;
auto iLength = (int) strlen ( szString );
while ( ( szPos = strchr ( szString, cChar ) )!=NULL )
memmove ( szPos, szPos + 1, iLength - ( szPos - szString ) );
}
static void PreprocessRules ( ClampRule_t * pRules, int nRules )
{
if ( !pRules )
return;
for ( int i = 0; i < nRules; ++i )
RemoveChars ( (char *) pRules[i].m_szSuffix, '!' );
}
static void PreprocessReplace ()
{
int nRules = sizeof ( g_dPalatalizeRules ) / sizeof ( g_dPalatalizeRules[0] );
for ( int i = 0; i < nRules; ++i )
{
RemoveChars ( (char *) g_dPalatalizeRules[i].m_szSuffix, '!' );
RemoveChars ( (char *) g_dPalatalizeRules[i].m_szAppend, '!' );
}
}
void stem_cz_init ()
{
PreprocessRules ( g_dCaseRules, sizeof ( g_dCaseRules ) / sizeof ( g_dCaseRules[0] ) );
PreprocessRules ( g_dPosessiveRules, sizeof ( g_dPosessiveRules ) / sizeof ( g_dPosessiveRules[0] ) );
PreprocessReplace ();
}
void stem_cz ( BYTE * word )
{
ApplyRules ( word, g_dCaseRules, sizeof ( g_dCaseRules ) / sizeof ( g_dCaseRules[0] ) );
ApplyRules ( word, g_dPosessiveRules, sizeof ( g_dPosessiveRules ) / sizeof ( g_dPosessiveRules[0] ) );
}
| 5,475
|
C++
|
.cpp
| 172
| 29.755814
| 110
| 0.557579
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| true
| false
|
16,941
|
searchdhttp.cpp
|
manticoresoftware_manticoresearch/src/searchdhttp.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "searchdhttp.h"
#include "jsonqueryfilter.h"
#include "attribute.h"
#include "sphinxpq.h"
#include "http/http_parser.h"
#include "searchdaemon.h"
#include "searchdha.h"
#include "searchdreplication.h"
#include "accumulator.h"
#include "networking_daemon.h"
#include "client_session.h"
#include "tracer.h"
#include "searchdbuddy.h"
#include "aggrexpr.h"
#include "compressed_http.h"
static bool g_bLogBadHttpReq = val_from_env ( "MANTICORE_LOG_HTTP_BAD_REQ", false ); // log content of bad http requests, ruled by this env variable
static int g_iLogHttpData = val_from_env ( "MANTICORE_LOG_HTTP_DATA", 0 ); // verbose logging of http data, ruled by this env variable
static bool LOG_LEVEL_HTTP = val_from_env ( "MANTICORE_LOG_HTTP", false ); // verbose logging http processing events, ruled by this env variable
#define LOG_COMPONENT_HTTP ""
#define HTTPINFO LOGMSG ( VERBOSE_DEBUG, HTTP, HTTP )
extern CSphString g_sStatusVersion;
static const Str_t g_sDataDisabled = FROMS("-");
Str_t Data2Log ( Str_t tMsg ) { return ( g_iLogHttpData ? Str_t ( tMsg.first, Min ( tMsg.second, g_iLogHttpData ) ) : g_sDataDisabled ); }
Str_t Data2Log ( ByteBlob_t tMsg ) { return ( g_iLogHttpData ? Str_t ( (const char *)tMsg.first, Min ( tMsg.second, g_iLogHttpData ) ) : g_sDataDisabled ); }
int HttpGetStatusCodes ( EHTTP_STATUS eStatus ) noexcept
{
switch ( eStatus )
{
case EHTTP_STATUS::_100: return 100;
case EHTTP_STATUS::_200: return 200;
case EHTTP_STATUS::_206: return 206;
case EHTTP_STATUS::_400: return 400;
case EHTTP_STATUS::_403: return 403;
case EHTTP_STATUS::_404: return 404;
case EHTTP_STATUS::_405: return 405;
case EHTTP_STATUS::_409: return 409;
case EHTTP_STATUS::_413: return 413;
case EHTTP_STATUS::_415: return 415;
case EHTTP_STATUS::_500: return 500;
case EHTTP_STATUS::_501: return 501;
case EHTTP_STATUS::_503: return 503;
case EHTTP_STATUS::_526: return 526;
default: return 503;
};
};
EHTTP_STATUS HttpGetStatusCodes ( int iStatus ) noexcept
{
switch ( iStatus )
{
case 100: return EHTTP_STATUS::_100;
case 200: return EHTTP_STATUS::_200;
case 206: return EHTTP_STATUS::_206;
case 400: return EHTTP_STATUS::_400;
case 403: return EHTTP_STATUS::_403;
case 404: return EHTTP_STATUS::_404;
case 405: return EHTTP_STATUS::_405;
case 409: return EHTTP_STATUS::_409;
case 413: return EHTTP_STATUS::_413;
case 415: return EHTTP_STATUS::_415;
case 500: return EHTTP_STATUS::_500;
case 501: return EHTTP_STATUS::_501;
case 503: return EHTTP_STATUS::_503;
case 526: return EHTTP_STATUS::_526;
default: return EHTTP_STATUS::_503;
};
}
inline constexpr const char* HttpGetStatusName ( EHTTP_STATUS eStatus ) noexcept
{
switch ( eStatus )
{
case EHTTP_STATUS::_100: return "100 Continue";
case EHTTP_STATUS::_200: return "200 OK";
case EHTTP_STATUS::_206: return "206 Partial Content";
case EHTTP_STATUS::_400: return "400 Bad Request";
case EHTTP_STATUS::_403: return "403 Forbidden";
case EHTTP_STATUS::_404: return "404 Not Found";
case EHTTP_STATUS::_405: return "405 Method Not Allowed";
case EHTTP_STATUS::_409: return "409 Conflict";
case EHTTP_STATUS::_413: return "413 Request Entity Too Large";
case EHTTP_STATUS::_415: return "415 Unsupported Media Type";
case EHTTP_STATUS::_500: return "500 Internal Server Error";
case EHTTP_STATUS::_501: return "501 Not Implemented";
case EHTTP_STATUS::_503: return "503 Service Unavailable";
case EHTTP_STATUS::_526: return "526 Invalid SSL Certificate";
default: return "503 Service Unavailable";
};
}
extern CSphString g_sMySQLVersion;
static void HttpBuildReply ( CSphVector<BYTE> & dData, EHTTP_STATUS eCode, const char * sBody, int iBodyLen, bool bHtml, bool bHeadReply )
{
assert ( sBody && iBodyLen );
const char * sContent = ( bHtml ? "text/html" : "application/json" );
CSphString sHttp;
sHttp.SetSprintf ( "HTTP/1.1 %s\r\nServer: %s\r\nContent-Type: %s; charset=UTF-8\r\nContent-Length:%d\r\n\r\n", HttpGetStatusName(eCode), g_sMySQLVersion.cstr(), sContent, iBodyLen );
int iHeaderLen = sHttp.Length();
int iBufLen = iHeaderLen;
if ( !bHeadReply )
iBufLen += iBodyLen;
dData.Resize ( iBufLen );
memcpy ( dData.Begin(), sHttp.cstr(), iHeaderLen );
if ( !bHeadReply )
memcpy ( dData.Begin() + iHeaderLen, sBody, iBodyLen );
}
void HttpBuildReply ( CSphVector<BYTE> & dData, EHTTP_STATUS eCode, const char * sBody, int iBodyLen, bool bHtml )
{
HttpBuildReply ( dData, eCode, sBody, iBodyLen, bHtml, false );
}
void HttpBuildReplyHead ( CSphVector<BYTE> & dData, EHTTP_STATUS eCode, const char * sBody, int iBodyLen, bool bHeadReply )
{
HttpBuildReply ( dData, eCode, sBody, iBodyLen, false, bHeadReply );
}
void HttpErrorReply ( CSphVector<BYTE> & dData, EHTTP_STATUS eCode, const char * szError )
{
JsonObj_c tErr;
tErr.AddStr ( "error", szError );
CSphString sJsonError = tErr.AsString();
HttpBuildReply ( dData, eCode, sJsonError.cstr(), sJsonError.Length(), false );
}
struct Endpoint_t
{
const char* m_szName1;
const char* m_szName2;
};
static Endpoint_t g_dEndpoints[(size_t)EHTTP_ENDPOINT::TOTAL] =
{
{ "index.html", nullptr },
{ "sql", nullptr },
{ "search", "json/search" },
{ "index", "json/index" },
{ "create", "json/create" },
{ "insert", "json/insert" },
{ "replace", "json/replace" },
{ "update", "json/update" },
{ "delete", "json/delete" },
{ "bulk", "json/bulk" },
{ "pq", "json/pq" },
{ "cli", nullptr },
{ "cli_json", nullptr },
{ "_bulk", nullptr }
};
EHTTP_ENDPOINT StrToHttpEndpoint ( const CSphString& sEndpoint ) noexcept
{
if ( sEndpoint.Begins ( g_dEndpoints[(int)EHTTP_ENDPOINT::PQ].m_szName1 ) || sEndpoint.Begins ( g_dEndpoints[(int)EHTTP_ENDPOINT::PQ].m_szName2 ) )
return EHTTP_ENDPOINT::PQ;
for ( int i = 0; i < (int)EHTTP_ENDPOINT::TOTAL; ++i )
if ( sEndpoint == g_dEndpoints[i].m_szName1 || ( g_dEndpoints[i].m_szName2 && sEndpoint == g_dEndpoints[i].m_szName2 ) )
return EHTTP_ENDPOINT ( i );
return EHTTP_ENDPOINT::TOTAL;
}
///////////////////////////////////////////////////////////////////////
/// Stream reader
bool CharStream_c::GetError() const
{
return ( ( m_pIn && m_pIn->GetError() ) || !m_sError.IsEmpty() );
}
const CSphString & CharStream_c::GetErrorMessage() const
{
if ( m_pIn && m_pIn->GetError() )
return m_pIn->GetErrorMessage();
else
return m_sError;
}
/// stub - returns feed string
class BlobStream_c final: public CharStream_c
{
Str_t m_sData;
public:
BlobStream_c ( const CSphString & sData )
: CharStream_c ( nullptr )
, m_sData { FromStr ( sData ) }
{}
Str_t Read() final
{
if ( m_bDone )
return dEmptyStr;
m_bDone = true;
return m_sData;
}
Str_t ReadAll() final
{
auto sData = Read();
Str_t sDescr = { sData.first, Min ( sData.second, 100 ) };
myinfo::SetDescription ( sDescr, sDescr.second );
return sData;
}
};
/// stream with known content length and no special massage over socket
class RawSocketStream_c final : public CharStream_c
{
int m_iContentLength;
bool m_bTerminated = false;
BYTE m_uOldTerminator = 0;
CSphVector<BYTE> m_dUnpacked;
bool m_bCompressed = false;
public:
RawSocketStream_c ( AsyncNetInputBuffer_c * pIn, int iContentLength, bool bCompressed )
: CharStream_c ( pIn )
, m_iContentLength ( iContentLength )
, m_bCompressed ( bCompressed )
{
assert ( pIn );
m_bDone = !m_iContentLength;
}
~RawSocketStream_c() final
{
if ( m_bTerminated )
m_pIn->Terminate ( 0, m_uOldTerminator );
m_pIn->DiscardProcessed ( 0 );
}
Str_t Read() final
{
if ( m_bDone )
return dEmptyStr;
m_pIn->DiscardProcessed ( 0 );
if ( !m_pIn->HasBytes() && m_pIn->ReadAny()<0 )
{
if ( !m_pIn->GetError() )
m_sError.SetSprintf ( "failed to receive HTTP request (error='%s')", sphSockError() );
m_bDone = true;
return dEmptyStr;
}
auto iChunk = Min ( m_iContentLength, m_pIn->HasBytes() );
m_iContentLength -= iChunk;
m_bDone = !m_iContentLength;
// Temporary write \0 at the end, since parser wants z-terminated buf
if ( m_bDone )
{
m_uOldTerminator = m_pIn->Terminate ( iChunk, '\0' );
m_bTerminated = true;
}
return Decompress ( m_pIn->PopTail ( iChunk ) );
}
Str_t ReadAll() final
{
if ( m_bDone )
return dEmptyStr;
// that is oneshot read - we sure, we're done
m_bDone = true;
if ( m_iContentLength && !m_pIn->ReadFrom ( m_iContentLength ) )
{
if ( !m_pIn->GetError() )
m_sError.SetSprintf ( "failed to receive HTTP request (error='%s')", sphSockError() );
return dEmptyStr;
}
m_uOldTerminator = m_pIn->Terminate ( m_iContentLength, '\0' );
return Decompress ( m_pIn->PopTail ( m_iContentLength ) );
}
Str_t Decompress ( const ByteBlob_t & tIn )
{
if ( !m_bCompressed )
return B2S ( tIn );
m_dUnpacked.Resize ( 0 );
if ( !GzipDecompress ( tIn, m_dUnpacked, m_sError ) )
{
m_bDone = true;
return dEmptyStr;
}
return Str_t ( m_dUnpacked );
}
};
/// chunked stream - i.e. total content length is unknown
class ChunkedSocketStream_c final: public CharStream_c
{
CSphVector<BYTE> m_dData; // used only in ReadAll() call
int m_iLastParsed;
bool m_bBodyDone;
CSphVector<Str_t> m_dBodies;
http_parser_settings m_tParserSettings;
http_parser* m_pParser;
private:
// callbacks
static int cbParserBody ( http_parser* pParser, const char* sAt, size_t iLen )
{
assert ( pParser->data );
auto pThis = static_cast<ChunkedSocketStream_c*> ( pParser->data );
return pThis->ParserBody ( { sAt, (int)iLen } );
}
static int cbMessageComplete ( http_parser* pParser )
{
assert ( pParser->data );
auto pThis = static_cast<ChunkedSocketStream_c*> ( pParser->data );
return pThis->MessageComplete();
}
inline int MessageComplete()
{
HTTPINFO << "ChunkedSocketStream_c::MessageComplete";
m_bBodyDone = true;
return 0;
}
inline int ParserBody ( Str_t sData )
{
HTTPINFO << "ParserBody chunked str with " << sData.second << " bytes '" << Data2Log ( sData ) << "'";;
if ( !IsEmpty ( sData ) )
m_dBodies.Add ( sData );
return 0;
}
void ParseBody ( ByteBlob_t sData )
{
HTTPINFO << "ParseBody chunked blob with " << sData.second << " bytes '" << Data2Log ( sData ) << "'";;
m_iLastParsed = (int)http_parser_execute ( m_pParser, &m_tParserSettings, (const char*)sData.first, sData.second );
if ( m_iLastParsed != sData.second )
{
HTTPINFO << "ParseBody error: parsed " << m_iLastParsed << ", chunk " << sData.second;
if ( !m_pIn->GetError() )
m_sError = http_errno_description ( (http_errno)m_pParser->http_errno );
}
}
public:
ChunkedSocketStream_c ( AsyncNetInputBuffer_c * pIn, http_parser * pParser, bool bBodyDone, CSphVector<Str_t> dBodies, int iLastParsed )
: CharStream_c ( pIn )
, m_iLastParsed ( iLastParsed )
, m_bBodyDone ( bBodyDone )
, m_pParser ( pParser )
{
assert ( pIn );
m_dBodies = std::move ( dBodies );
http_parser_settings_init ( &m_tParserSettings );
m_tParserSettings.on_body = cbParserBody;
m_tParserSettings.on_message_complete = cbMessageComplete;
m_pParser->data = this;
}
void DiscardLast()
{
m_pIn->PopTail ( std::exchange ( m_iLastParsed, 0 ) );
m_pIn->DiscardProcessed ( 0 );
}
~ChunkedSocketStream_c() final
{
DiscardLast();
}
Str_t Read() final
{
if ( m_bDone )
return dEmptyStr;
while ( m_dBodies.IsEmpty() )
{
if ( m_bBodyDone )
{
m_bDone = true;
return dEmptyStr;
}
DiscardLast();
if ( !m_pIn->HasBytes() )
{
switch ( m_pIn->ReadAny() )
{
case -1:
case 0:
m_bDone = true;
return dEmptyStr;
default:
break;
}
}
ParseBody ( m_pIn->Tail() );
}
auto sResult = m_dBodies.First();
m_dBodies.Remove ( 0 );
if ( m_bBodyDone && m_dBodies.IsEmpty() )
{
m_bDone = true;
const_cast<char&> ( sResult.first[sResult.second] ) = '\0';
}
return sResult;
}
Str_t ReadAll() final
{
auto sFirst = Read();
if ( m_bDone )
return sFirst;
m_dData.Append ( sFirst );
do
m_dData.Append ( Read() );
while ( !m_bDone );
m_dData.Add ( '\0' );
m_dData.Resize ( m_dData.GetLength() - 1 );
return m_dData;
}
};
///////////////////////////////////////////////////////////////////////
CSphString HttpEndpointToStr ( EHTTP_ENDPOINT eEndpoint )
{
assert ( eEndpoint < EHTTP_ENDPOINT::TOTAL );
return g_dEndpoints[(int)eEndpoint].m_szName1;
}
void HttpBuildReply ( CSphVector<BYTE> & dData, EHTTP_STATUS eCode, Str_t sReply, bool bHtml )
{
const char * sContent = ( bHtml ? "text/html" : "application/json" );
StringBuilder_c sHttp;
sHttp.Sprintf ( "HTTP/1.1 %s\r\nServer: %s\r\nContent-Type: %s; charset=UTF-8\r\nContent-Length: %d\r\n\r\n", HttpGetStatusName ( eCode ), g_sStatusVersion.cstr(), sContent, sReply.second );
dData.Reserve ( sHttp.GetLength() + sReply.second );
dData.Append ( (Str_t)sHttp );
dData.Append ( sReply );
}
HttpRequestParser_c::HttpRequestParser_c()
{
http_parser_settings_init ( &m_tParserSettings );
m_tParserSettings.on_url = cbParserUrl;
m_tParserSettings.on_header_field = cbParserHeaderField;
m_tParserSettings.on_header_value = cbParserHeaderValue;
m_tParserSettings.on_headers_complete = cbParseHeaderCompleted;
m_tParserSettings.on_body = cbParserBody;
m_tParserSettings.on_message_begin = cbMessageBegin;
m_tParserSettings.on_message_complete = cbMessageComplete;
m_tParserSettings.on_status = cbMessageStatus;
Reinit();
}
void HttpRequestParser_c::Reinit()
{
HTTPINFO << "HttpRequestParser_c::Reinit()";
http_parser_init ( &m_tParser, HTTP_REQUEST );
m_sEndpoint = "";
m_sCurField.Clear();
m_sCurValue.Clear();
m_hOptions.Reset();
m_eType = HTTP_GET;
m_sUrl.Clear();
m_bHeaderDone = false;
m_bBodyDone = false;
m_dParsedBodies.Reset();
m_iParsedBodyLength = 0;
m_iLastParsed = 0;
m_szError = nullptr;
m_tParser.data = this;
}
bool HttpRequestParser_c::ParseHeader ( ByteBlob_t sData )
{
HTTPINFO << "ParseChunk with " << sData.second << " bytes '" << Data2Log ( sData ) << "'";
m_iLastParsed = (int) http_parser_execute ( &m_tParser, &m_tParserSettings, (const char *)sData.first, sData.second );
if ( m_iLastParsed != sData.second )
{
if ( g_bLogBadHttpReq )
{
sphWarning ( "ParseChunk error: parsed %d, chunk %d, conn %d, %.*s", m_iLastParsed, sData.second, session::GetConnID(), sData.second, sData.first );
} else
{
HTTPINFO << "ParseChunk error: parsed " << m_iLastParsed << ", chunk " << sData.second;
}
m_szError = http_errno_description ( (http_errno)m_tParser.http_errno );
return true;
}
return m_bHeaderDone;
}
int HttpRequestParser_c::ParsedBodyLength() const
{
return m_iParsedBodyLength;
}
bool HttpRequestParser_c::Expect100() const
{
return m_hOptions.Exists ( "expect" ) && m_hOptions["expect"] == "100-continue";
}
bool HttpRequestParser_c::KeepAlive() const
{
return m_bKeepAlive;
}
const char* HttpRequestParser_c::Error() const
{
return m_szError;
}
bool HttpRequestParser_c::IsBuddyQuery () const
{
return ::IsBuddyQuery ( m_hOptions );
}
inline int Char2Hex ( BYTE uChar )
{
switch (uChar)
{
case '0': return 0;
case '1': return 1;
case '2': return 2;
case '3': return 3;
case '4': return 4;
case '5': return 5;
case '6': return 6;
case '7': return 7;
case '8': return 8;
case '9': return 9;
case 'a': case 'A': return 10;
case 'b': case 'B': return 11;
case 'c': case 'C': return 12;
case 'd': case 'D': return 13;
case 'e': case 'E': return 14;
case 'f': case 'F': return 15;
default: break;
}
return -1;
}
inline int Chars2Hex ( const char* pSrc )
{
int iRes = Char2Hex ( *( pSrc + 1 ) );
return iRes < 0 ? iRes : iRes + Char2Hex ( *pSrc ) * 16;
}
void UriPercentReplace ( Str_t & sEntity, Replace_e ePlus )
{
if ( IsEmpty ( sEntity ) )
return;
const char* pSrc = sEntity.first;
auto* pDst = const_cast<char*> ( pSrc );
char cPlus = ((bool)ePlus) ? ' ' : '+';
auto* pEnd = pSrc + sEntity.second;
while ( pSrc < pEnd )
{
if ( *pSrc=='%' && *(pSrc+1) && *(pSrc+2) )
{
auto iCode = Chars2Hex ( pSrc + 1 );
if ( iCode<0 )
{
*pDst++ = *pSrc++;
continue;
}
pSrc += 3;
*pDst++ = (char) iCode;
} else
{
*pDst++ = ( *pSrc=='+' ? cPlus : *pSrc );
pSrc++;
}
}
sEntity.second = int ( pDst - sEntity.first );
}
void StoreRawQuery ( OptionsHash_t& hOptions, CSphString sRawBody )
{
if ( sRawBody.IsEmpty() )
return;
hOptions.Add ( std::move ( sRawBody ), "raw_query" );
}
void DecodeAndStoreRawQuery ( OptionsHash_t& hOptions, const Str_t& sWholeData )
{
if ( IsEmpty ( sWholeData ) )
return;
// store raw query
CSphString sRawBody ( sWholeData ); // copy raw data, important!
Str_t sRaw { sRawBody.cstr(), sWholeData.second }; // FromStr implies strlen(), but we don't need it
UriPercentReplace ( sRaw, Replace_e::NoPlus ); // avoid +-decoding
*const_cast<char*> ( sRaw.first + sRaw.second ) = '\0';
StoreRawQuery ( hOptions, std::move ( sRawBody ));
}
void HttpRequestParser_c::ParseList ( Str_t sData, OptionsHash_t & hOptions )
{
HTTPINFO << "ParseList with " << sData.second << " bytes '" << Data2Log ( sData ) << "'";
const char * sCur = sData.first;
const char* sLast = sCur;
const char * sEnd = sCur + sData.second;
Str_t sName = dEmptyStr;
for ( ; sCur<sEnd; ++sCur )
{
switch (*sCur)
{
case '=':
{
sName = { sLast, int ( sCur - sLast ) };
UriPercentReplace ( sName );
sLast = sCur + 1;
break;
}
case '&':
{
Str_t sVal { sLast, int ( sCur - sLast ) };
UriPercentReplace ( sVal );
ToLower ( sName );
hOptions.Add ( sVal, sName );
sLast = sCur + 1;
sName = dEmptyStr;
break;
}
default:
break;
}
}
if ( IsEmpty ( sName ) )
return;
Str_t sVal { sLast, int ( sCur - sLast ) };
UriPercentReplace ( sVal );
ToLower ( sName );
hOptions.Add ( sVal, sName );
}
inline int HttpRequestParser_c::ParserUrl ( Str_t sData )
{
HTTPINFO << "ParseUrl with " << sData.second << " bytes '" << sData << "'";
m_sUrl << sData;
return 0;
}
inline void HttpRequestParser_c::FinishParserUrl ()
{
if ( m_sUrl.IsEmpty() )
return;
auto _ = AtScopeExit ( [this] { m_sUrl.Clear(); } );
auto sData = (Str_t) m_sUrl;
http_parser_url tUri;
if ( http_parser_parse_url ( sData.first, sData.second, 0, &tUri ) )
return;
DWORD uPath = ( 1UL<<UF_PATH );
DWORD uQuery = ( 1UL<<UF_QUERY );
if ( ( tUri.field_set & uPath )!=0 )
{
const char * sPath = sData.first + tUri.field_data[UF_PATH].off;
int iPathLen = tUri.field_data[UF_PATH].len;
if ( *sPath=='/' )
{
++sPath;
--iPathLen;
}
// URL should be split fully to point to proper endpoint
m_sEndpoint.SetBinary ( sPath, iPathLen );
// transfer endpoint for further parse
m_hOptions.Add ( m_sEndpoint, "endpoint" );
}
if ( ( tUri.field_set & uQuery )!=0 )
{
Str_t sRawGetQuery { sData.first + tUri.field_data[UF_QUERY].off, tUri.field_data[UF_QUERY].len };
if ( m_eType == HTTP_GET )
DecodeAndStoreRawQuery ( m_hOptions, sRawGetQuery );
ParseList ( sRawGetQuery, m_hOptions );
}
CSphString sFullURL;
if ( ( tUri.field_set & uPath )!=0 && ( tUri.field_set & uQuery )!=0 )
{
const char * sStart = sData.first + tUri.field_data[UF_PATH].off;
const char * sEnd = sData.first + tUri.field_data[UF_QUERY].off + tUri.field_data[UF_QUERY].len;
sFullURL.SetBinary ( sStart, sEnd-sStart );
m_hOptions.Add ( sFullURL, "full_url" );
} else if ( ( tUri.field_set & uPath )!=0 )
{
const char * sPath = sData.first + tUri.field_data[UF_PATH].off;
int iPathLen = tUri.field_data[UF_PATH].len;
// URL should be split fully to point to proper endpoint
sFullURL.SetBinary ( sPath, iPathLen );
m_hOptions.Add ( sFullURL, "full_url" );
}
}
inline int HttpRequestParser_c::ParserHeaderField ( Str_t sData )
{
FinishParserKeyVal();
m_sCurField << sData;
return 0;
}
inline int HttpRequestParser_c::ParserHeaderValue ( Str_t sData )
{
m_sCurValue << sData;
return 0;
}
inline void HttpRequestParser_c::FinishParserKeyVal()
{
if ( m_sCurValue.IsEmpty() )
return;
CSphString sField = (CSphString)m_sCurField;
sField.ToLower();
m_hOptions.Add ( (CSphString)m_sCurValue, sField );
m_sCurField.Clear();
m_sCurValue.Clear();
}
inline int HttpRequestParser_c::ParserBody ( Str_t sData )
{
HTTPINFO << "ParserBody parser with " << sData.second << " bytes '" << Data2Log ( sData ) << "'";
if ( !m_dParsedBodies.IsEmpty() )
{
auto& sLast = m_dParsedBodies.Last();
if ( sLast.first + sLast.second == sData.first )
sLast.second += sData.second;
else
m_dParsedBodies.Add ( sData );
} else
m_dParsedBodies.Add ( sData );
m_iParsedBodyLength += sData.second;
return 0;
}
inline int HttpRequestParser_c::MessageComplete ()
{
HTTPINFO << "MessageComplete";
m_bBodyDone = true;
return 0;
}
inline int HttpRequestParser_c::ParseHeaderCompleted ()
{
HTTPINFO << "ParseHeaderCompleted. Upgrade=" << (unsigned int)m_tParser.upgrade << ", length=" << (int64_t) m_tParser.content_length;
// we're not support connection upgrade - so just reset upgrade flag, if detected.
// rfc7540 section-3.2 (for http/2) says, we just should continue as if no 'upgrade' header was found
m_tParser.upgrade = 0;
// connection wide http options
m_bKeepAlive = ( http_should_keep_alive ( &m_tParser ) != 0 );
m_eType = (http_method)m_tParser.method;
FinishParserKeyVal();
FinishParserUrl();
m_bHeaderDone = true;
return 0;
}
int HttpRequestParser_c::cbParserUrl ( http_parser* pParser, const char* sAt, size_t iLen )
{
assert ( pParser->data );
auto pThis = static_cast<HttpRequestParser_c*> ( pParser->data );
return pThis->ParserUrl ( { sAt, (int)iLen } );
}
int HttpRequestParser_c::cbParserHeaderField ( http_parser* pParser, const char* sAt, size_t iLen )
{
assert ( pParser->data );
auto pThis = static_cast<HttpRequestParser_c*> ( pParser->data );
return pThis->ParserHeaderField ( { sAt, (int)iLen } );
}
int HttpRequestParser_c::cbParserHeaderValue ( http_parser* pParser, const char* sAt, size_t iLen )
{
assert ( pParser->data );
auto pThis = static_cast<HttpRequestParser_c*> ( pParser->data );
return pThis->ParserHeaderValue ( { sAt, (int)iLen } );
}
int HttpRequestParser_c::cbParseHeaderCompleted ( http_parser* pParser )
{
assert ( pParser->data );
auto pThis = static_cast<HttpRequestParser_c*> ( pParser->data );
return pThis->ParseHeaderCompleted ();
}
int HttpRequestParser_c::cbMessageBegin ( http_parser* pParser )
{
HTTPINFO << "cbMessageBegin";
return 0;
}
int HttpRequestParser_c::cbMessageComplete ( http_parser* pParser )
{
assert ( pParser->data );
auto pThis = static_cast<HttpRequestParser_c*> ( pParser->data );
return pThis->MessageComplete();
}
int HttpRequestParser_c::cbMessageStatus ( http_parser* pParser, const char* sAt, size_t iLen )
{
HTTPINFO << "cbMessageStatus with '" << Str_t { sAt, (int)iLen } << "'";
return 0;
}
int HttpRequestParser_c::cbParserBody ( http_parser* pParser, const char* sAt, size_t iLen )
{
assert ( pParser->data );
auto pThis = static_cast<HttpRequestParser_c*> ( pParser->data );
return pThis->ParserBody ( { sAt, (int)iLen } );
}
static const char * g_sIndexPage =
R"index(<!DOCTYPE html>
<html>
<head>
<title>Manticore</title>
</head>
<body>
<h1>Manticore daemon</h1>
<p>%s</p>
</body>
</html>)index";
static void HttpHandlerIndexPage ( CSphVector<BYTE> & dData )
{
StringBuilder_c sIndexPage;
sIndexPage.Appendf ( g_sIndexPage, g_sStatusVersion.cstr() );
HttpBuildReply ( dData, EHTTP_STATUS::_200, (Str_t)sIndexPage, true );
}
//////////////////////////////////////////////////////////////////////////
class JsonRequestBuilder_c : public RequestBuilder_i
{
public:
JsonRequestBuilder_c ( const char* szQuery, CSphString sEndpoint )
: m_sEndpoint ( std::move ( sEndpoint ) )
, m_tQuery ( szQuery )
{
// fixme: we can implement replacing indexes in a string (without parsing) if it becomes a performance issue
}
void BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const final
{
// replace "table" value in the json query
m_tQuery.DelItem ( "table" );
m_tQuery.AddStr ( "table", tAgent.m_tDesc.m_sIndexes.cstr() );
CSphString sRequest = m_tQuery.AsString();
auto tWr = APIHeader ( tOut, SEARCHD_COMMAND_JSON, VER_COMMAND_JSON ); // API header
tOut.SendString ( m_sEndpoint.cstr() );
tOut.SendString ( sRequest.cstr() );
}
private:
CSphString m_sEndpoint;
mutable JsonObj_c m_tQuery;
};
class JsonReplyParser_c : public ReplyParser_i
{
public:
JsonReplyParser_c ( int & iAffected, int & iWarnings )
: m_iAffected ( iAffected )
, m_iWarnings ( iWarnings )
{}
bool ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & ) const final
{
CSphString sEndpoint = tReq.GetString();
EHTTP_ENDPOINT eEndpoint = StrToHttpEndpoint ( sEndpoint );
if ( eEndpoint!=EHTTP_ENDPOINT::JSON_UPDATE && eEndpoint!=EHTTP_ENDPOINT::JSON_DELETE )
return false;
DWORD uLength = tReq.GetDword();
CSphFixedVector<BYTE> dResult ( uLength+1 );
tReq.GetBytes ( dResult.Begin(), (int)uLength );
dResult[uLength] = '\0';
return sphGetResultStats ( (const char *)dResult.Begin(), m_iAffected, m_iWarnings, eEndpoint==EHTTP_ENDPOINT::JSON_UPDATE );
}
protected:
int & m_iAffected;
int & m_iWarnings;
};
std::unique_ptr<QueryParser_i> CreateQueryParser( bool bJson )
{
if ( bJson )
return sphCreateJsonQueryParser();
else
return sphCreatePlainQueryParser();
}
std::unique_ptr<RequestBuilder_i> CreateRequestBuilder ( Str_t sQuery, const SqlStmt_t & tStmt )
{
if ( tStmt.m_bJson )
{
assert ( !tStmt.m_sEndpoint.IsEmpty() );
return std::make_unique<JsonRequestBuilder_c> ( sQuery.first, tStmt.m_sEndpoint );
} else
{
return std::make_unique<SphinxqlRequestBuilder_c> ( sQuery, tStmt );
}
}
std::unique_ptr<ReplyParser_i> CreateReplyParser ( bool bJson, int & iUpdated, int & iWarnings )
{
if ( bJson )
return std::make_unique<JsonReplyParser_c> ( iUpdated, iWarnings );
else
return std::make_unique<SphinxqlReplyParser_c> ( &iUpdated, &iWarnings );
}
//////////////////////////////////////////////////////////////////////////
class HttpErrorReporter_c final : public StmtErrorReporter_i
{
public:
void Ok ( int iAffectedRows, const CSphString & /*sWarning*/, int64_t /*iLastInsertId*/ ) final { m_iAffected = iAffectedRows; }
void Ok ( int iAffectedRows, int /*nWarnings*/ ) final { m_iAffected = iAffectedRows; }
void ErrorEx ( EMYSQL_ERR eErr, const char * sError ) final;
RowBuffer_i * GetBuffer() final { return nullptr; }
bool IsError() const { return m_bError; }
const char * GetError() const { return m_sError.cstr(); }
int GetAffectedRows() const { return m_iAffected; }
private:
bool m_bError {false};
CSphString m_sError;
int m_iAffected {0};
};
void HttpErrorReporter_c::ErrorEx ( EMYSQL_ERR /*iErr*/, const char * sError )
{
m_bError = true;
m_sError = sError;
}
StmtErrorReporter_i * CreateHttpErrorReporter()
{
return new HttpErrorReporter_c();
}
//////////////////////////////////////////////////////////////////////////
// all the handlers for http queries
void ReplyBuf ( Str_t sResult, EHTTP_STATUS eStatus, bool bNeedHttpResponse, CSphVector<BYTE> & dData )
{
if ( bNeedHttpResponse )
HttpBuildReply ( dData, eStatus, sResult, false );
else
{
dData.Resize ( 0 );
dData.Append ( sResult );
}
}
void HttpHandler_c::SetErrorFormat ( bool bNeedHttpResponse )
{
m_bNeedHttpResponse = bNeedHttpResponse;
}
CSphVector<BYTE> & HttpHandler_c::GetResult()
{
return m_dData;
}
const CSphString & HttpHandler_c::GetError () const
{
return m_sError;
}
EHTTP_STATUS HttpHandler_c::GetStatusCode() const
{
return m_eHttpCode;
}
void HttpHandler_c::ReportError ( const char * szError, EHTTP_STATUS eStatus )
{
m_sError = szError;
ReportError ( eStatus );
}
void HttpHandler_c::ReportError ( EHTTP_STATUS eStatus )
{
m_eHttpCode = eStatus;
if ( m_bNeedHttpResponse )
sphHttpErrorReply ( m_dData, eStatus, m_sError.cstr() );
else
{
m_dData.Resize ( m_sError.Length() );
memcpy ( m_dData.Begin(), m_sError.cstr(), m_dData.GetLength() );
}
}
void HttpHandler_c::FormatError ( EHTTP_STATUS eStatus, const char * sError, ... )
{
va_list ap;
va_start ( ap, sError );
m_sError.SetSprintfVa ( sError, ap );
va_end ( ap );
m_eHttpCode = eStatus;
if ( m_bNeedHttpResponse )
sphHttpErrorReply ( m_dData, eStatus, m_sError.cstr() );
else
{
int iLen = m_sError.Length();
m_dData.Resize ( iLen+1 );
memcpy ( m_dData.Begin(), m_sError.cstr(), iLen );
m_dData[iLen] = '\0';
}
}
void HttpHandler_c::ReportError ( const char * sError, HttpErrorType_e eType, EHTTP_STATUS eStatus, const char * sIndex )
{
if ( sError )
m_sError = sError;
m_eHttpCode = eStatus;
const char * sErrorType = GetErrorTypeName ( eType );
int iStatus = HttpGetStatusCodes ( eStatus );
CSphString sReply = ( sErrorType ? JsonEncodeResultError ( m_sError, sErrorType, iStatus, sIndex ) : JsonEncodeResultError ( m_sError, iStatus ) );
HttpBuildReplyHead ( GetResult(), eStatus, sReply.cstr(), sReply.Length(), false );
}
void HttpHandler_c::BuildReply ( const CSphString & sResult, EHTTP_STATUS eStatus )
{
m_eHttpCode = eStatus;
ReplyBuf ( FromStr ( sResult ), eStatus, m_bNeedHttpResponse, m_dData );
}
void HttpHandler_c::BuildReply ( const char* szResult, EHTTP_STATUS eStatus )
{
m_eHttpCode = eStatus;
ReplyBuf ( FromSz( szResult ), eStatus, m_bNeedHttpResponse, m_dData );
}
void HttpHandler_c::BuildReply ( const StringBuilder_c & sResult, EHTTP_STATUS eStatus )
{
m_eHttpCode = eStatus;
ReplyBuf ( (Str_t)sResult, eStatus, m_bNeedHttpResponse, m_dData );
}
void HttpHandler_c::BuildReply ( Str_t sResult, EHTTP_STATUS eStatus )
{
m_eHttpCode = eStatus;
ReplyBuf ( sResult, eStatus, m_bNeedHttpResponse, m_dData );
}
// check whether given served index is exist and has requested type
bool HttpHandler_c::CheckValid ( const ServedIndex_c* pServed, const CSphString& sIndex, IndexType_e eType )
{
if ( !pServed )
{
FormatError ( EHTTP_STATUS::_500, "no such table '%s'", sIndex.cstr () );
return false;
}
if ( pServed->m_eType!=eType )
{
FormatError ( EHTTP_STATUS::_500, "table '%s' is not %s", sIndex.cstr(), GetIndexTypeName ( eType ) );
return false;
}
return true;
}
static ESphAggrFunc GetAggr ( Aggr_e eAggrFunc )
{
switch ( eAggrFunc )
{
case Aggr_e::MIN: return SPH_AGGR_MIN;
case Aggr_e::MAX: return SPH_AGGR_MAX;
case Aggr_e::SUM: return SPH_AGGR_SUM;
case Aggr_e::AVG: return SPH_AGGR_AVG;
default: return SPH_AGGR_NONE;
}
}
template<bool HAS_ATTRS>
void AddCompositeItems ( const CSphString & sCol, CSphVector<CSphQueryItem> & dItems, sph::StringSet * pAttrs )
{
if_const ( HAS_ATTRS )
{
assert ( pAttrs );
}
StrVec_t dAttrs;
sphSplit ( dAttrs, sCol.cstr(), "," );
for ( const CSphString & sCol : dAttrs )
{
if_const ( HAS_ATTRS )
if ( (*pAttrs)[sCol] )
continue;
CSphQueryItem & tItem = dItems.Add();
tItem.m_sExpr = sCol;
tItem.m_sAlias = sCol;
if_const ( HAS_ATTRS )
(*pAttrs).Add ( sCol );
}
}
std::unique_ptr<PubSearchHandler_c> CreateMsearchHandler ( std::unique_ptr<QueryParser_i> pQueryParser, QueryType_e eQueryType, JsonQuery_c & tQuery )
{
tQuery.m_pQueryParser = pQueryParser.get();
int iQueries = ( 1 + tQuery.m_dAggs.GetLength() );
std::unique_ptr<PubSearchHandler_c> pHandler = std::make_unique<PubSearchHandler_c> ( iQueries, std::move ( pQueryParser ), eQueryType, true );
if ( !tQuery.m_dAggs.GetLength() || eQueryType==QUERY_SQL )
{
pHandler->SetQuery ( 0, tQuery, nullptr );
return pHandler;
}
tQuery.m_dRefItems = tQuery.m_dItems;
// FIXME!!! no need to add count for AggrFunc aggregates
CSphQueryItem & tCountItem = tQuery.m_dItems.Add();
tCountItem.m_sExpr = "count(*)";
tCountItem.m_sAlias = "count(*)";
sph::StringSet hAttrs;
for ( const auto & tItem : tQuery.m_dItems )
hAttrs.Add ( tItem.m_sAlias );
ARRAY_FOREACH ( i, tQuery.m_dAggs )
{
const JsonAggr_t & tBucket = tQuery.m_dAggs[i];
// add only new items
if ( hAttrs[tBucket.m_sCol] )
continue;
if ( tBucket.m_eAggrFunc==Aggr_e::COUNT )
continue;
if ( tBucket.m_eAggrFunc==Aggr_e::COMPOSITE )
{
AddCompositeItems<true> ( tBucket.m_sCol, tQuery.m_dItems, &hAttrs );
continue;
}
CSphQueryItem & tItem = tQuery.m_dItems.Add();
if ( tBucket.m_eAggrFunc!=Aggr_e::NONE )
{
tItem.m_sExpr = DumpAggr ( tBucket.m_sCol.cstr(), tBucket );
tItem.m_sAlias = GetAggrName ( i, tBucket.m_sCol );
tItem.m_eAggrFunc = GetAggr ( tBucket.m_eAggrFunc );
} else
{
tItem.m_sExpr = tBucket.m_sCol;
tItem.m_sAlias = tBucket.m_sCol;
hAttrs.Add ( tBucket.m_sCol );
}
}
tQuery.m_bFacetHead = true;
pHandler->SetQuery ( 0, tQuery, nullptr );
int iRefLimit = tQuery.m_iLimit;
int iRefOffset = tQuery.m_iOffset;
ARRAY_FOREACH ( i, tQuery.m_dAggs )
{
const JsonAggr_t & tBucket = tQuery.m_dAggs[i];
// common to main query but flags, select list and ref items should uniq
tQuery.m_eGroupFunc = SPH_GROUPBY_ATTR;
// facet flags
tQuery.m_bFacetHead = false;
tQuery.m_bFacet = true;
// select list to facet query
tQuery.m_sSelect.SetSprintf ( "%s", tBucket.m_sCol.cstr() );
// ref items to facet query
tQuery.m_dRefItems.Resize ( 0 );
switch ( tBucket.m_eAggrFunc )
{
case Aggr_e::SIGNIFICANT:
case Aggr_e::HISTOGRAM:
case Aggr_e::DATE_HISTOGRAM:
case Aggr_e::RANGE:
case Aggr_e::DATE_RANGE:
{
CSphQueryItem & tItem = tQuery.m_dRefItems.Add();
tItem.m_sExpr = DumpAggr ( tBucket.m_sCol.cstr(), tBucket );
tItem.m_sAlias = GetAggrName ( i, tBucket.m_sCol );
}
break;
case Aggr_e::COMPOSITE:
AddCompositeItems<false> ( tBucket.m_sCol, tQuery.m_dRefItems, nullptr );
break;
case Aggr_e::COUNT:
break;
case Aggr_e::MIN:
case Aggr_e::MAX:
case Aggr_e::SUM:
case Aggr_e::AVG:
{
CSphQueryItem & tItem = tQuery.m_dRefItems.Add();
tItem.m_sExpr = DumpAggr ( tBucket.m_sCol.cstr(), tBucket );
tItem.m_sAlias = GetAggrName ( i, tBucket.m_sCol );
tItem.m_eAggrFunc = GetAggr ( tBucket.m_eAggrFunc );
}
break;
default:
{
CSphQueryItem & tItem = tQuery.m_dRefItems.Add();
tItem.m_sExpr = tBucket.m_sCol;
tItem.m_sAlias = tBucket.m_sCol;
}
break;
}
// FIXME!!! no need to add count for AggrFunc aggregates
CSphQueryItem & tAggCountItem = tQuery.m_dRefItems.Add();
tAggCountItem.m_sExpr = "count(*)";
tAggCountItem.m_sAlias = "count(*)";
switch ( tBucket.m_eAggrFunc )
{
case Aggr_e::SIGNIFICANT:
case Aggr_e::HISTOGRAM:
case Aggr_e::DATE_HISTOGRAM:
case Aggr_e::RANGE:
case Aggr_e::DATE_RANGE:
tQuery.m_sFacetBy = tQuery.m_sGroupBy = GetAggrName ( i, tBucket.m_sCol );
break;
// GroupBy \ FacetBy should be empty for explicit grouper
case Aggr_e::COUNT:
case Aggr_e::MIN:
case Aggr_e::MAX:
case Aggr_e::SUM:
case Aggr_e::AVG:
break;
case Aggr_e::COMPOSITE:
default:
tQuery.m_sGroupBy = tBucket.m_sCol;
tQuery.m_sFacetBy = tBucket.m_sCol;
break;
}
tQuery.m_sOrderBy = "@weight desc";
if ( tBucket.m_eAggrFunc==Aggr_e::COMPOSITE )
tQuery.m_eGroupFunc = SPH_GROUPBY_MULTIPLE;
if ( tBucket.m_sSort.IsEmpty() )
{
switch ( tBucket.m_eAggrFunc )
{
case Aggr_e::SIGNIFICANT:
case Aggr_e::HISTOGRAM:
case Aggr_e::DATE_HISTOGRAM:
case Aggr_e::RANGE:
case Aggr_e::DATE_RANGE:
tQuery.m_sGroupSortBy = "@groupby asc";
break;
case Aggr_e::COMPOSITE:
tQuery.m_sGroupSortBy = "@weight desc";
break;
default:
tQuery.m_sGroupSortBy = "@groupby desc";
break;
}
} else
{
tQuery.m_sGroupSortBy = tBucket.m_sSort;
}
// aggregate and main query could have different sizes
if ( tBucket.m_iSize )
{
tQuery.m_iLimit = tBucket.m_iSize;
tQuery.m_iOffset = 0;
} else
{
tQuery.m_iLimit = iRefLimit;
tQuery.m_iOffset = iRefOffset;
}
pHandler->SetQuery ( i+1, tQuery, nullptr );
}
return pHandler;
}
struct HttpOptionTrait_t
{
const OptionsHash_t & m_tOptions;
explicit HttpOptionTrait_t ( const OptionsHash_t & tOptions )
: m_tOptions ( tOptions )
{}
};
class HttpSearchHandler_c : public HttpHandler_c, public HttpOptionTrait_t
{
public:
bool Process () final
{
TRACE_CONN ( "conn", "HttpSearchHandler_c::Process" );
CSphString sWarning;
std::unique_ptr<QueryParser_i> pQueryParser = PreParseQuery();
if ( !pQueryParser )
return false;
int iQueries = ( 1 + m_tParsed.m_tQuery.m_dAggs.GetLength() );
if ( IsBuddyQuery ( m_tOptions ) )
m_tParsed.m_tQuery.m_uDebugFlags |= QUERY_DEBUG_NO_LOG;
std::unique_ptr<PubSearchHandler_c> tHandler = CreateMsearchHandler ( std::move ( pQueryParser ), m_eQueryType, m_tParsed.m_tQuery );
SetStmt ( *tHandler );
QueryProfile_c tProfile;
tProfile.m_eNeedPlan = (PLAN_FLAVOUR)m_tParsed.m_iPlan;
tProfile.m_bNeedProfile = m_tParsed.m_bProfile;
bool bNeedProfile = m_tParsed.m_bProfile || ( m_tParsed.m_iPlan != 0 );
if ( bNeedProfile )
tHandler->SetProfile ( &tProfile );
// search
tHandler->RunQueries();
if ( bNeedProfile )
tProfile.Stop();
AggrResult_t * pRes = tHandler->GetResult ( 0 );
if ( !pRes->m_sError.IsEmpty() )
{
ReportError ( pRes->m_sError.cstr(), EHTTP_STATUS::_500 );
return false;
}
// fixme: handle more than one warning at once?
if ( pRes->m_sWarning.IsEmpty() && !m_tParsed.m_sWarning.IsEmpty() )
pRes->m_sWarning = m_tParsed.m_sWarning;
CSphFixedVector<AggrResult_t *> dAggsRes ( iQueries );
dAggsRes[0] = tHandler->GetResult ( 0 );
ARRAY_FOREACH ( i,m_tParsed.m_tQuery.m_dAggs )
dAggsRes[i+1] = tHandler->GetResult ( i+1 );
CSphString sResult = EncodeResult ( dAggsRes, bNeedProfile ? &tProfile : nullptr );
BuildReply ( sResult, EHTTP_STATUS::_200 );
return true;
}
explicit HttpSearchHandler_c ( const OptionsHash_t & tOptions )
: HttpOptionTrait_t ( tOptions )
{}
protected:
QueryType_e m_eQueryType {QUERY_SQL};
ParsedJsonQuery_t m_tParsed;
virtual std::unique_ptr<QueryParser_i> PreParseQuery() = 0;
virtual CSphString EncodeResult ( const VecTraits_T<AggrResult_t *> & dRes, QueryProfile_c * pProfile ) = 0;
virtual void SetStmt ( PubSearchHandler_c & tHandler ) {};
};
static void AddAggs ( const VecTraits_T<SqlStmt_t> & dStmt, JsonQuery_c & tQuery )
{
assert ( dStmt.GetLength()>1 && dStmt[0].m_tQuery.m_bFacetHead );
tQuery.m_dAggs.Reserve ( dStmt.GetLength()-1 );
for ( int i=1; i<dStmt.GetLength(); i++ )
{
const CSphQuery & tRef = dStmt[i].m_tQuery;
assert ( tRef.m_dRefItems.GetLength() );
JsonAggr_t & tBucket = tQuery.m_dAggs.Add();
tBucket.m_sBucketName = tRef.m_dRefItems[0].m_sExpr;
tBucket.m_sCol = tRef.m_dRefItems[0].m_sAlias;
}
}
class HttpSearchHandler_SQL_c final: public HttpSearchHandler_c
{
public:
explicit HttpSearchHandler_SQL_c ( const OptionsHash_t & tOptions )
: HttpSearchHandler_c ( tOptions )
{}
protected:
CSphVector<SqlStmt_t> m_dStmt;
std::unique_ptr<QueryParser_i> PreParseQuery() final
{
const CSphString * pRawQl = m_tOptions ( "query" );
if ( !pRawQl || pRawQl->IsEmpty() )
{
ReportError ( "query missing", EHTTP_STATUS::_400 );
return nullptr;
}
if ( !sphParseSqlQuery ( FromStr ( *pRawQl ), m_dStmt, m_sError, SPH_COLLATION_DEFAULT ) )
{
ReportError ( EHTTP_STATUS::_400 );
return nullptr;
}
( (CSphQuery &) m_tParsed.m_tQuery ) = m_dStmt[0].m_tQuery;
bool bFacet = ( m_dStmt.GetLength()>1 );
for ( const auto & tStmt : m_dStmt )
{
// should be all FACET in case of multiple queries
bFacet &= ( tStmt.m_tQuery.m_bFacet || tStmt.m_tQuery.m_bFacetHead );
if ( tStmt.m_eStmt!=STMT_SELECT )
{
ReportError ( "only SELECT queries are supported", EHTTP_STATUS::_501 );
return nullptr;
}
}
if ( m_dStmt.GetLength()>1 && !bFacet )
{
ReportError ( "only FACET multiple queries supported", EHTTP_STATUS::_501 );
return nullptr;
}
if ( bFacet )
AddAggs ( m_dStmt, m_tParsed.m_tQuery );
m_eQueryType = QUERY_SQL;
return sphCreatePlainQueryParser();
}
CSphString EncodeResult ( const VecTraits_T<AggrResult_t *> & dRes, QueryProfile_c * pProfile ) final
{
return sphEncodeResultJson ( dRes, m_tParsed.m_tQuery, pProfile, ResultSetFormat_e::MntSearch );
}
void SetStmt ( PubSearchHandler_c & tHandler ) final
{
tHandler.SetStmt ( m_dStmt[0] );
for ( int i=1; i<m_dStmt.GetLength(); i++ )
tHandler.SetQuery ( i, m_dStmt[i].m_tQuery, nullptr );
}
};
typedef std::pair<CSphString,MysqlColumnType_e> ColumnNameType_t;
static const char * GetMysqlTypeName ( MysqlColumnType_e eType )
{
switch ( eType )
{
case MYSQL_COL_DECIMAL: return "decimal";
case MYSQL_COL_LONG: return "long";
case MYSQL_COL_FLOAT: return "float";
case MYSQL_COL_DOUBLE: return "double";
case MYSQL_COL_LONGLONG: return "long long";
case MYSQL_COL_STRING: return "string";
default: return "unknown";
};
}
static MysqlColumnType_e GetMysqlTypeByName ( const CSphString& sType )
{
if ( sType=="decimal")
return MYSQL_COL_DECIMAL;
if ( sType == "long" )
return MYSQL_COL_LONG;
if ( sType == "float" )
return MYSQL_COL_FLOAT;
if ( sType == "double" )
return MYSQL_COL_DOUBLE;
if ( sType == "long long" )
return MYSQL_COL_LONGLONG;
if ( sType == "string" )
return MYSQL_COL_STRING;
assert (false && "Unknown column");
return MYSQL_COL_STRING;
}
JsonEscapedBuilder& operator<< ( JsonEscapedBuilder& tOut, MysqlColumnType_e eType )
{
tOut.FixupSpacedAndAppendEscaped ( GetMysqlTypeName ( eType ) );
return tOut;
}
const StrBlock_t dJsonObjCustom { { ",\n", 2 }, { "[", 1 }, { "]", 1 } }; // json object with custom formatting
class JsonRowBuffer_c : public RowBuffer_i
{
public:
JsonRowBuffer_c()
{
m_dBuf.StartBlock ( dJsonObjCustom );
}
void PutFloatAsString ( float fVal, const char * ) override
{
AddDataColumn();
m_dBuf << fVal;
}
void PutDoubleAsString ( double fVal, const char * ) override
{
AddDataColumn();
m_dBuf << fVal;
}
void PutNumAsString ( int64_t iVal ) override
{
AddDataColumn();
m_dBuf << iVal;
}
void PutNumAsString ( uint64_t uVal ) override
{
AddDataColumn();
m_dBuf << uVal;
}
void PutNumAsString ( int iVal ) override
{
AddDataColumn();
m_dBuf << iVal;
}
void PutNumAsString ( DWORD uVal ) override
{
AddDataColumn();
m_dBuf << uVal;
}
void PutArray ( const ByteBlob_t& dBlob, bool ) override
{
AddDataColumn();
m_dBuf.FixupSpacedAndAppendEscaped ( (const char*)dBlob.first, dBlob.second );
}
void PutString ( Str_t sMsg ) override
{
PutArray ( S2B ( sMsg ), false );
}
void PutMicrosec ( int64_t iUsec ) override
{
PutNumAsString ( iUsec );
}
void PutNULL() override
{
AddDataColumn();
m_dBuf << "null";
}
bool Commit() override
{
m_dBuf.FinishBlock ( false ); // finish previous item
m_dBuf.ObjectBlock(); // start new item
++m_iTotalRows;
m_iCol = 0;
return true;
}
void Eof ( bool bMoreResults, int iWarns, const char* ) override
{
m_dBuf.FinishBlock ( true ); // last doc, allow empty
m_dBuf.FinishBlock ( false ); // docs section
DataFinish ( m_iTotalRows, nullptr, nullptr );
m_dBuf.FinishBlock ( false ); // root object
}
void Error ( const char * szError, EMYSQL_ERR ) override
{
auto _ = m_dBuf.Object ( false );
DataFinish ( 0, szError, nullptr );
m_bError = true;
m_sError = szError;
}
void Ok ( int iAffectedRows, int iWarns, const char * sMessage, bool bMoreResults, int64_t iLastInsertId ) override
{
auto _ = m_dBuf.Object ( false );
DataFinish ( iAffectedRows, nullptr, sMessage );
}
void HeadBegin () override
{
m_iTotalRows = 0;
m_dBuf.ObjectWBlock();
m_dBuf.Named ( "columns" );
m_dBuf.ArrayBlock();
}
bool HeadEnd ( bool , int ) override
{
m_dBuf.FinishBlock(false);
m_dBuf.Named ( "data" );
m_dBuf.ArrayWBlock();
m_dBuf.ObjectBlock();
return true;
}
void HeadColumn ( const char * szName, MysqlColumnType_e eType ) override
{
JsonEscapedBuilder sEscapedName;
sEscapedName.FixupSpacedAndAppendEscaped ( szName );
ColumnNameType_t tCol { (CSphString)sEscapedName, eType };
auto _ = m_dBuf.Object(false);
m_dBuf.AppendName ( tCol.first.cstr(), false );
auto tTypeBlock = m_dBuf.Object(false);
m_dBuf.NamedVal ( "type", eType );
m_dColumns.Add ( tCol );
}
void Add ( BYTE ) override {}
const JsonEscapedBuilder & Finish()
{
m_dBuf.FinishBlocks();
return m_dBuf;
}
private:
JsonEscapedBuilder m_dBuf;
CSphVector<ColumnNameType_t> m_dColumns;
int m_iTotalRows = 0;
int m_iCol = 0;
void AddDataColumn()
{
m_dBuf.AppendName ( m_dColumns[m_iCol].first.cstr(), false );
++m_iCol;
}
void DataFinish ( int iTotal, const char* szError, const char* szWarning )
{
m_dBuf.NamedVal ( "total", iTotal );
m_dBuf.NamedString ( "error", szError );
m_dBuf.NamedString ( "warning", szWarning );
m_iCol = 0;
m_dColumns.Reset();
}
};
/* Below is typical answer sent back by sql endpoint query mode=raw
[{
"columns":[{"id":{"type":"long long"}},{"proto":{"type":"string"}},{"state":{"type":"string"}},{"host":{"type":"string"}},{"connid":{"type":"long long"}},{"killed":{"type":"string"}},{"last cmd":{"type":"string"}}],
"data":[
{"id":2,"proto":"http","state":"query","host":"127.0.0.1:50787","connid":9,"killed":"0","last cmd":"select"},
{"id":1,"proto":"mysql,ssl","state":"query","host":"127.0.0.1:50514","connid":1,"killed":"0","last cmd":"show queries"}
],
"total":2,
"error":"",
"warning":""
}]
*/
void ConvertJsonDataset ( const JsonObj_c & tRoot, const char * sStmt, RowBuffer_i & tOut )
{
assert ( tRoot.IsArray() );
int iItem = 0;
int iItemsCount = tRoot.Size();
CSphString sParseError;
for ( const auto & tItem : tRoot )
{
int iTotal = 0;
CSphString sError, sWarning;
if ( !tItem.FetchIntItem ( iTotal, "total", sParseError, true ) )
{
tOut.Error ( sParseError.cstr() );
break;
}
if ( !tItem.FetchStrItem ( sError, "error", sParseError, true ) )
{
tOut.Error ( sParseError.cstr() );
break;
}
if ( !tItem.FetchStrItem ( sWarning, "warning", sParseError, true ) )
{
tOut.Error ( sParseError.cstr() );
break;
}
if ( !sError.IsEmpty() )
{
LogSphinxqlError ( sStmt, FromStr ( sError ) );
session::GetClientSession()->m_sError = sError;
session::GetClientSession()->m_tLastMeta.m_sError = sError;
tOut.Error ( sError.cstr() );
break;
}
if ( !iItem ) // only zero result set sets meta
{
session::GetClientSession()->m_tLastMeta.m_iTotalMatches = iTotal;
session::GetClientSession()->m_tLastMeta.m_sWarning = sWarning;
}
using ColType_t = std::pair<CSphString, MysqlColumnType_e>;
CSphVector<ColType_t> dSqlColumns;
assert ( tItem.IsObj() );
JsonObj_c tColumnsNode = tItem.GetArrayItem ( "columns", sParseError, true );
for ( const auto & tColumnNode : tColumnsNode )
{
assert ( tColumnNode.IsObj() ); // like {"id":{"type":"long long"}}
for ( const auto & tColumn : tColumnNode )
{
CSphString sType;
if ( !tColumn.FetchStrItem ( sType, "type", sParseError, false ) )
return;
auto eType = GetMysqlTypeByName ( sType );
dSqlColumns.Add ( { tColumn.Name(), eType } );
}
}
// fill headers
if ( !dSqlColumns.IsEmpty() )
{
tOut.HeadBegin ();
dSqlColumns.for_each ( [&] ( const auto& tColumn ) { tOut.HeadColumn ( tColumn.first.cstr(), tColumn.second ); } );
tOut.HeadEnd();
} else
{
// just simple OK reply without table
tOut.Ok ( iTotal, ( sWarning.IsEmpty() ? 0 : 1 ) );
break;
}
JsonObj_c tDataNodes = tItem.GetItem ( "data" );
for ( const auto & tDataRow : tDataNodes )
{
assert ( tDataRow.IsObj() ); // like {"id":2,"proto":"http","state":"query","host":"127.0.0.1:50787","connid":9,"killed":"0","last cmd":"select"}
for ( const auto & tDataCol : tDataRow )
{
if ( tDataCol.IsInt () )
tOut.PutNumAsString ( tDataCol.IntVal() );
else if ( tDataCol.IsDbl () )
tOut.PutDoubleAsString ( tDataCol.DblVal() );
else
tOut.PutString ( tDataCol.StrVal() );
}
if ( !tOut.Commit() )
return;
}
tOut.Eof ( iItem+1!=iItemsCount, ( sWarning.IsEmpty() ? 0 : 1 ) );
iItem++;
}
}
class HttpRawSqlHandler_c final: public HttpHandler_c, public HttpOptionTrait_t
{
Str_t m_sQuery;
public:
explicit HttpRawSqlHandler_c ( Str_t sQuery, const OptionsHash_t & tOptions )
: HttpOptionTrait_t ( tOptions )
, m_sQuery ( sQuery )
{}
bool Process () final
{
TRACE_CONN ( "conn", "HttpRawSqlHandler_c::Process" );
if ( IsEmpty ( m_sQuery ) )
{
ReportError ( "query missing", EHTTP_STATUS::_400 );
return false;
}
if ( IsBuddyQuery ( m_tOptions ) )
session::SetQueryDisableLog();
JsonRowBuffer_c tOut;
session::Execute ( m_sQuery, tOut );
if ( tOut.IsError() )
{
ReportError ( tOut.GetError().scstr(), EHTTP_STATUS::_500 );
return false;
}
BuildReply ( tOut.Finish(), EHTTP_STATUS::_200 );
return true;
}
};
class HttpHandler_JsonSearch_c : public HttpSearchHandler_c
{
Str_t m_sQuery;
public:
explicit HttpHandler_JsonSearch_c ( Str_t sQuery, const OptionsHash_t & tOptions )
: HttpSearchHandler_c ( tOptions )
, m_sQuery ( sQuery )
{}
std::unique_ptr<QueryParser_i> PreParseQuery() override
{
// TODO!!! add parsing collation from the query
m_tParsed.m_tQuery.m_eCollation = session::GetCollation();
if ( !sphParseJsonQuery ( m_sQuery, m_tParsed ) )
{
ReportError ( TlsMsg::szError(), EHTTP_STATUS::_400 );
return nullptr;
}
m_eQueryType = QUERY_JSON;
return sphCreateJsonQueryParser();
}
protected:
CSphString EncodeResult ( const VecTraits_T<AggrResult_t *> & dRes, QueryProfile_c * pProfile ) override
{
return sphEncodeResultJson ( dRes, m_tParsed.m_tQuery, pProfile, ResultSetFormat_e::MntSearch );
}
};
class HttpJsonTxnTraits_c
{
protected:
HttpJsonTxnTraits_c() = default;
explicit HttpJsonTxnTraits_c ( ResultSetFormat_e eFormat )
: m_eFormat ( eFormat )
{}
void ProcessBegin ( const CSphString& sIndex )
{
// for now - only local mutable indexes are suitable
{
auto pIndex = GetServed ( sIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) )
return;
}
HttpErrorReporter_c tReporter;
sphHandleMysqlBegin ( tReporter, FromStr (sIndex) );
m_iInserts = 0;
m_iUpdates = 0;
}
bool ProcessCommitRollback ( Str_t sIndex, DocID_t tDocId, JsonObj_c & tResult, CSphString & sError ) const
{
HttpErrorReporter_c tReporter;
sphHandleMysqlCommitRollback ( tReporter, sIndex, true );
if ( tReporter.IsError() )
{
sError = tReporter.GetError();
tResult = sphEncodeInsertErrorJson ( sIndex.first, sError.cstr(), m_eFormat );
} else
{
auto iDeletes = tReporter.GetAffectedRows();
auto dLastIds = session::LastIds();
if ( !dLastIds.IsEmpty() )
tDocId = dLastIds[0];
tResult = sphEncodeTxnResultJson ( sIndex.first, tDocId, m_iInserts, iDeletes, m_iUpdates, m_eFormat );
}
return !tReporter.IsError();
}
int m_iInserts = 0;
int m_iUpdates = 0;
const ResultSetFormat_e m_eFormat = ResultSetFormat_e::MntSearch;
};
static bool ProcessInsert ( SqlStmt_t & tStmt, DocID_t tDocId, JsonObj_c & tResult, CSphString & sError, ResultSetFormat_e eFormat )
{
HttpErrorReporter_c tReporter;
sphHandleMysqlInsert ( tReporter, tStmt );
if ( tReporter.IsError() )
{
sError = tReporter.GetError();
tResult = sphEncodeInsertErrorJson ( tStmt.m_sIndex.cstr(), sError.cstr(), eFormat );
} else
{
auto dLastIds = session::LastIds();
if ( !dLastIds.IsEmpty() )
tDocId = dLastIds[0];
tResult = sphEncodeInsertResultJson ( tStmt.m_sIndex.cstr(), tStmt.m_eStmt == STMT_REPLACE, tDocId, eFormat );
}
return !tReporter.IsError();
}
static bool ProcessDelete ( Str_t sRawRequest, const SqlStmt_t& tStmt, DocID_t tDocId, JsonObj_c & tResult, CSphString & sError, ResultSetFormat_e eFormat )
{
HttpErrorReporter_c tReporter;
sphHandleMysqlDelete ( tReporter, tStmt, std::move ( sRawRequest ) );
if ( tReporter.IsError() )
{
sError = tReporter.GetError();
tResult = sphEncodeInsertErrorJson ( tStmt.m_sIndex.cstr(), sError.cstr(), eFormat );
} else
{
tResult = sphEncodeDeleteResultJson ( tStmt.m_sIndex.cstr(), tDocId, tReporter.GetAffectedRows(), eFormat );
}
return !tReporter.IsError();
}
class HttpHandler_JsonInsert_c final : public HttpHandler_c
{
Str_t m_sQuery;
bool m_bReplace;
public:
HttpHandler_JsonInsert_c ( Str_t sQuery, bool bReplace )
: m_sQuery ( sQuery )
, m_bReplace ( bReplace )
{}
bool Process () final
{
TRACE_CONN ( "conn", "HttpHandler_JsonInsert_c::Process" );
SqlStmt_t tStmt;
DocID_t tDocId = 0;
if ( !sphParseJsonInsert ( m_sQuery.first, tStmt, tDocId, m_bReplace, m_sError ) )
{
ReportError ( nullptr, HttpErrorType_e::Parse, EHTTP_STATUS::_400, tStmt.m_sIndex.cstr() );
return false;
}
tStmt.m_sEndpoint = HttpEndpointToStr ( m_bReplace ? EHTTP_ENDPOINT::JSON_REPLACE : EHTTP_ENDPOINT::JSON_INSERT );
JsonObj_c tResult = JsonNull;
bool bResult = ProcessInsert ( tStmt, tDocId, tResult, m_sError, ResultSetFormat_e::MntSearch );
if ( bResult )
BuildReply ( tResult.AsString(), bResult ? EHTTP_STATUS::_200 : EHTTP_STATUS::_409 );
else
ReportError ( nullptr, HttpErrorType_e::ActionRequestValidation, EHTTP_STATUS::_409, tStmt.m_sIndex.cstr() );
return bResult;
}
};
class HttpJsonUpdateTraits_c
{
int m_iLastUpdated = 0;
protected:
HttpJsonUpdateTraits_c() = default;
explicit HttpJsonUpdateTraits_c ( ResultSetFormat_e eFormat )
: m_eFormat ( eFormat )
{}
bool ProcessUpdate ( Str_t sRawRequest, const SqlStmt_t & tStmt, DocID_t tDocId, JsonObj_c & tResult, CSphString & sError )
{
HttpErrorReporter_c tReporter;
sphHandleMysqlUpdate ( tReporter, tStmt, sRawRequest );
if ( tReporter.IsError() )
{
sError = tReporter.GetError();
tResult = sphEncodeInsertErrorJson ( tStmt.m_sIndex.cstr(), sError.cstr(), m_eFormat );
} else
{
tResult = sphEncodeUpdateResultJson ( tStmt.m_sIndex.cstr(), tDocId, tReporter.GetAffectedRows(), m_eFormat );
}
m_iLastUpdated = tReporter.GetAffectedRows();
return !tReporter.IsError();
}
int GetLastUpdated() const
{
return m_iLastUpdated;
}
const ResultSetFormat_e m_eFormat = ResultSetFormat_e::MntSearch;
};
class HttpHandler_JsonUpdate_c : public HttpHandler_c, HttpJsonUpdateTraits_c
{
protected:
Str_t m_sQuery;
public:
explicit HttpHandler_JsonUpdate_c ( Str_t sQuery )
: m_sQuery ( sQuery )
{}
bool Process () final
{
TRACE_CONN ( "conn", "HttpHandler_JsonUpdate_c::Process" );
SqlStmt_t tStmt;
tStmt.m_bJson = true;
tStmt.m_tQuery.m_eQueryType = QUERY_JSON;
tStmt.m_sEndpoint = HttpEndpointToStr ( EHTTP_ENDPOINT::JSON_UPDATE );
DocID_t tDocId = 0;
if ( !ParseQuery ( tStmt, tDocId ) )
{
ReportError ( nullptr, HttpErrorType_e::Parse, EHTTP_STATUS::_400, tStmt.m_sIndex.cstr() );
return false;
}
JsonObj_c tResult = JsonNull;
bool bResult = ProcessQuery ( tStmt, tDocId, tResult );
if ( bResult )
BuildReply ( tResult.AsString(), bResult ? EHTTP_STATUS::_200 : EHTTP_STATUS::_409 );
else
ReportError ( nullptr, HttpErrorType_e::ActionRequestValidation, EHTTP_STATUS::_409, tStmt.m_sIndex.cstr() );
return bResult;
}
protected:
virtual bool ParseQuery ( SqlStmt_t & tStmt, DocID_t & tDocId )
{
return sphParseJsonUpdate ( m_sQuery, tStmt, tDocId, m_sError );
}
virtual bool ProcessQuery ( const SqlStmt_t & tStmt, DocID_t tDocId, JsonObj_c & tResult )
{
return ProcessUpdate ( m_sQuery, tStmt, tDocId, tResult, m_sError );
}
};
class HttpHandler_JsonDelete_c final : public HttpHandler_JsonUpdate_c
{
public:
explicit HttpHandler_JsonDelete_c ( Str_t sQuery )
: HttpHandler_JsonUpdate_c ( sQuery )
{}
protected:
bool ParseQuery ( SqlStmt_t & tStmt, DocID_t & tDocId ) final
{
tStmt.m_sEndpoint = HttpEndpointToStr ( EHTTP_ENDPOINT::JSON_DELETE );
return sphParseJsonDelete ( m_sQuery, tStmt, tDocId, m_sError );
}
bool ProcessQuery ( const SqlStmt_t & tStmt, DocID_t tDocId, JsonObj_c & tResult ) final
{
return ProcessDelete ( m_sQuery, tStmt, tDocId, tResult, m_sError, ResultSetFormat_e::MntSearch );
}
};
// stream for lines - each 'Read()' returns single line (lines split by \r or \n)
class NDJsonStream_c
{
CharStream_c & m_tIn;
CSphVector<char> m_dLastChunk;
Str_t m_sCurChunk { dEmptyStr };
bool m_bDone;
int m_iJsons = 0;
public:
explicit NDJsonStream_c ( CharStream_c& tIn )
: m_tIn { tIn }
, m_bDone { m_tIn.Eof() }
{}
inline bool Eof() const { return m_bDone;}
Str_t ReadLine()
{
assert ( !m_bDone );
while (true)
{
if ( IsEmpty ( m_sCurChunk ) )
{
if ( m_tIn.Eof() || m_tIn.GetError() )
break;
m_sCurChunk = m_tIn.Read();
}
const char* szLine = m_sCurChunk.first;
const char* pEnd = szLine + m_sCurChunk.second;
const char* p = szLine;
while ( p<pEnd && *p!='\r' && *p!='\n' )
++p;
if ( p==pEnd )
{
m_dLastChunk.Append ( szLine, p-szLine );
m_sCurChunk = dEmptyStr;
continue;
}
*( const_cast<char*> ( p ) ) = '\0';
++p;
m_sCurChunk = { p, pEnd - p };
Str_t sResult;
if ( m_dLastChunk.IsEmpty () )
{
sResult = { szLine, p - szLine - 1 };
// that is commented out, as we better will deal with empty strings on parser level instead.
// if ( IsEmpty ( sResult ) )
// continue;
++m_iJsons;
HTTPINFO << "chunk " << m_iJsons << " '" << Data2Log ( sResult ) << "'";;
} else
{
m_dLastChunk.Append ( szLine, p - szLine );
sResult = m_dLastChunk;
--sResult.second; // exclude terminating \0
m_dLastChunk.Resize ( 0 );
++m_iJsons;
HTTPINFO << "chunk last " << m_iJsons << " '" << Data2Log ( sResult ) << "'";;
}
return sResult;
}
m_bDone = true;
m_dLastChunk.Add ( '\0' );
m_dLastChunk.Resize ( m_dLastChunk.GetLength() - 1 );
Str_t sResult = m_dLastChunk;
++m_iJsons;
HTTPINFO << "chunk termination " << m_iJsons << " '" << Data2Log ( sResult ) << "'";
return sResult;
}
bool GetError() const { return m_tIn.GetError(); }
const CSphString & GetErrorMessage() const { return m_tIn.GetErrorMessage(); }
};
static Str_t TrimHeadSpace ( Str_t tLine )
{
if ( IsEmpty ( tLine ) )
return tLine;
const char * sCur = tLine.first;
const char * sEnd = sCur + tLine.second;
while ( sCur<sEnd && sphIsSpace ( *sCur ) )
sCur++;
return Str_t { sCur, sEnd-sCur };
}
class HttpHandler_JsonBulk_c : public HttpHandler_c, public HttpJsonUpdateTraits_c, public HttpJsonTxnTraits_c
{
protected:
NDJsonStream_c m_tSource;
const OptionsHash_t& m_tOptions;
public:
HttpHandler_JsonBulk_c ( CharStream_c& tSource, const OptionsHash_t & tOptions )
: m_tSource ( tSource )
, m_tOptions ( tOptions )
{}
bool Process ()
{
TRACE_CONN ( "conn", "HttpHandler_JsonBulk_c::Process" );
if ( !CheckNDJson() )
return false;
JsonObj_c tResults ( true );
bool bResult = false;
int iCurLine = 0;
int iLastTxStartLine = 0;
auto FinishBulk = [&, this] ( EHTTP_STATUS eStatus = EHTTP_STATUS::_200 ) {
JsonObj_c tRoot;
tRoot.AddItem ( "items", tResults );
tRoot.AddInt ( "current_line", iCurLine );
tRoot.AddInt ( "skipped_lines", iCurLine - iLastTxStartLine );
tRoot.AddBool ( "errors", !bResult );
tRoot.AddStr ( "error", m_sError.IsEmpty() ? "" : m_sError );
if ( eStatus == EHTTP_STATUS::_200 && !bResult )
eStatus = EHTTP_STATUS::_500;
BuildReply ( tRoot.AsString(), eStatus );
HTTPINFO << "inserted " << iCurLine;
return bResult;
};
auto AddResult = [&tResults] ( const char* szStmt, JsonObj_c& tResult ) {
JsonObj_c tItem;
tItem.AddItem ( szStmt, tResult );
tResults.AddItem ( tItem );
};
if ( m_tSource.Eof() )
return FinishBulk();
// originally we execute txn for single index
// if there is combo, we fall back to query-by-query commits
CSphString sTxnIdx;
CSphString sStmt;
while ( !m_tSource.Eof() )
{
auto tQuery = m_tSource.ReadLine();
tQuery = TrimHeadSpace ( tQuery ); // could be a line with only whitespace chars
++iCurLine;
DocID_t tDocId = 0;
JsonObj_c tResult = JsonNull;
if ( IsEmpty ( tQuery ) )
{
if ( session::IsInTrans() )
{
assert ( !sTxnIdx.IsEmpty() );
// empty query finishes current txn
bResult = ProcessCommitRollback ( FromStr ( sTxnIdx ), tDocId, tResult, m_sError );
AddResult ( "bulk", tResult );
if ( !bResult )
break;
sTxnIdx = "";
iLastTxStartLine = iCurLine;
}
continue;
}
bResult = false;
auto& tCrashQuery = GlobalCrashQueryGetRef();
tCrashQuery.m_dQuery = { (const BYTE*) tQuery.first, tQuery.second };
const char* szStmt = tQuery.first;
SqlStmt_t tStmt;
tStmt.m_bJson = true;
CSphString sQuery;
if ( !sphParseJsonStatement ( szStmt, tStmt, sStmt, sQuery, tDocId, m_sError ) )
{
HTTPINFO << "inserted " << iCurLine << ", error: " << m_sError;
return FinishBulk ( EHTTP_STATUS::_400 );
}
if ( sTxnIdx.IsEmpty() )
{
sTxnIdx = tStmt.m_sIndex;
ProcessBegin ( sTxnIdx );
}
else if ( session::IsInTrans() && sTxnIdx!=tStmt.m_sIndex )
{
assert ( !sTxnIdx.IsEmpty() );
// we should finish current txn, as we got another index
bResult = ProcessCommitRollback ( FromStr ( sTxnIdx ), tDocId, tResult, m_sError );
AddResult ( "bulk", tResult );
if ( !bResult )
break;
sTxnIdx = tStmt.m_sIndex;
ProcessBegin ( sTxnIdx );
iLastTxStartLine = iCurLine;
}
switch ( tStmt.m_eStmt )
{
case STMT_INSERT:
case STMT_REPLACE:
bResult = ProcessInsert ( tStmt, tDocId, tResult, m_sError, ResultSetFormat_e::MntSearch );
if ( bResult )
++m_iInserts;
break;
case STMT_UPDATE:
tStmt.m_sEndpoint = HttpEndpointToStr ( EHTTP_ENDPOINT::JSON_UPDATE );
bResult = ProcessUpdate ( FromStr ( sQuery ), tStmt, tDocId, tResult, m_sError );
if ( bResult )
m_iUpdates += GetLastUpdated();
break;
case STMT_DELETE:
tStmt.m_sEndpoint = HttpEndpointToStr ( EHTTP_ENDPOINT::JSON_DELETE );
bResult = ProcessDelete ( FromStr ( sQuery ), tStmt, tDocId, tResult, m_sError, ResultSetFormat_e::MntSearch );
break;
default:
HTTPINFO << "inserted " << iCurLine << ", got unknown statement:" << (int)tStmt.m_eStmt;
return FinishBulk ( EHTTP_STATUS::_400 );
}
if ( !bResult || !session::IsInTrans() )
AddResult ( sStmt.cstr(), tResult );
// no further than the first error
if ( !bResult )
break;
if ( !session::IsInTrans() )
iLastTxStartLine = iCurLine;
}
if ( bResult && session::IsInTrans() )
{
assert ( !sTxnIdx.IsEmpty() );
// We're in txn - that is, nothing committed, and we should do it right now
JsonObj_c tResult;
bResult = ProcessCommitRollback ( FromStr ( sTxnIdx ), 0, tResult, m_sError );
AddResult ( "bulk", tResult );
if ( bResult )
iLastTxStartLine = iCurLine;
}
session::SetInTrans ( false );
HTTPINFO << "inserted " << iCurLine << " result: " << (int)bResult << ", error:" << m_sError;
return FinishBulk();
}
private:
bool CheckNDJson()
{
if ( !m_tOptions.Exists ( "content-type" ) )
{
ReportError ( "Content-Type must be set", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return false;
}
auto sContentType = m_tOptions["content-type"].ToLower();
auto dParts = sphSplit ( sContentType.cstr(), ";" );
if ( dParts.IsEmpty() || dParts[0] != "application/x-ndjson" )
{
ReportError ( "Content-Type must be application/x-ndjson", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return false;
}
return true;
}
};
class HttpHandlerPQ_c final : public HttpHandler_c, public HttpOptionTrait_t
{
Str_t m_sQuery;
public:
HttpHandlerPQ_c ( Str_t sQuery, const OptionsHash_t & tOptions )
: HttpOptionTrait_t ( tOptions )
, m_sQuery ( sQuery )
{}
bool Process () final;
private:
// FIXME!!! handle replication for InsertOrReplaceQuery and Delete
bool DoCallPQ ( const CSphString & sIndex, const JsonObj_c & tPercolate, bool bVerbose );
bool InsertOrReplaceQuery ( const CSphString& sIndex, const JsonObj_c& tJsonQuery, const JsonObj_c& tRoot, CSphString* pUID, bool bReplace );
bool ListQueries ( const CSphString & sIndex );
bool Delete ( const CSphString & sIndex, const JsonObj_c & tRoot );
};
struct BulkDoc_t
{
CSphString m_sAction;
CSphString m_sIndex;
DocID_t m_tDocid { 0 };
Str_t m_tDocLine;
};
struct BulkTnx_t
{
int m_iFrom { -1 };
int m_iCount { 0 };
};
class HttpHandlerEsBulk_c : public HttpCompatBaseHandler_c, public HttpJsonUpdateTraits_c, public HttpJsonTxnTraits_c
{
public:
HttpHandlerEsBulk_c ( Str_t sBody, int iReqType, const SmallStringHash_T<CSphString> & hOpts )
: HttpCompatBaseHandler_c ( sBody, iReqType, hOpts )
, HttpJsonUpdateTraits_c ( ResultSetFormat_e::ES )
, HttpJsonTxnTraits_c ( ResultSetFormat_e::ES )
{}
bool Process () override;
private:
bool ProcessTnx ( const VecTraits_T<BulkTnx_t> & dTnx, VecTraits_T<BulkDoc_t> & dDocs, JsonObj_c & tItems );
bool Validate();
void ReportLogError ( const char * sError, HttpErrorType_e eType , EHTTP_STATUS eStatus, bool bLogOnly );
};
static std::unique_ptr<HttpHandler_c> CreateHttpHandler ( EHTTP_ENDPOINT eEndpoint, CharStream_c & tSource, Str_t & sQuery, OptionsHash_t & tOptions, http_method eRequestType )
{
const CSphString * pOption = nullptr;
sQuery = dEmptyStr;
auto SetQuery = [&sQuery] ( Str_t&& sData ) {
auto& tCrashQuery = GlobalCrashQueryGetRef();
tCrashQuery.m_dQuery = { (const BYTE*)sData.first, sData.second };
sQuery = sData;
};
// SPH_HTTP_ENDPOINT_SQL SPH_HTTP_ENDPOINT_CLI SPH_HTTP_ENDPOINT_CLI_JSON these endpoints url-encoded, all others are plain json, and we don't want to waste time pre-parsing them
if ( eEndpoint== EHTTP_ENDPOINT::SQL || eEndpoint== EHTTP_ENDPOINT::CLI || eEndpoint== EHTTP_ENDPOINT::CLI_JSON )
{
auto sWholeData = tSource.ReadAll();
if ( tSource.GetError() )
return nullptr;
if ( eEndpoint == EHTTP_ENDPOINT::SQL )
{
const std::array<Str_t, 3> sQueries { FROMS ( "query=" ), FROMS ( "mode=raw&query=" ), FROMS ( "raw_response=true&query=" ) };
if ( std::any_of ( sQueries.cbegin(), sQueries.cend(), [&sWholeData] ( const Str_t& S ) { return sWholeData.second >= S.second && 0 == memcmp ( sWholeData.first, S.first, S.second ); } ) )
{
DecodeAndStoreRawQuery ( tOptions, sWholeData );
HttpRequestParser_c::ParseList ( sWholeData, tOptions );
} else
{
StoreRawQuery ( tOptions, { sWholeData } );
tOptions.Add ( sWholeData, "query" );
}
} else
StoreRawQuery ( tOptions, { sWholeData } );
}
switch ( eEndpoint )
{
case EHTTP_ENDPOINT::SQL:
{
bool bRawMode = false;
pOption = tOptions ( "mode" );
if ( pOption )
bRawMode = *pOption == "raw";
else
{
pOption = tOptions ( "raw_response" );
if ( pOption )
bRawMode = *pOption == "true";
}
if ( bRawMode )
{
auto pQuery = tOptions ( "query" );
if ( pQuery )
SetQuery ( FromStr ( *pQuery ) );
return std::make_unique<HttpRawSqlHandler_c> ( sQuery, tOptions ); // non-json
}
else
{
pOption = tOptions ( "raw_query" );
if ( pOption )
SetQuery ( FromStr (*pOption) );
return std::make_unique<HttpSearchHandler_SQL_c> ( tOptions ); // non-json
}
}
case EHTTP_ENDPOINT::CLI:
case EHTTP_ENDPOINT::CLI_JSON:
{
pOption = tOptions ( "raw_query" );
auto tQuery = pOption ? FromStr ( *pOption ) : dEmptyStr;
SetQuery ( std::move ( tQuery ) );
return std::make_unique<HttpRawSqlHandler_c> ( sQuery, tOptions ); // non-json
}
case EHTTP_ENDPOINT::JSON_SEARCH:
SetQuery ( tSource.ReadAll() );
return std::make_unique<HttpHandler_JsonSearch_c> ( sQuery, tOptions ); // json
case EHTTP_ENDPOINT::JSON_INDEX:
case EHTTP_ENDPOINT::JSON_CREATE:
case EHTTP_ENDPOINT::JSON_INSERT:
case EHTTP_ENDPOINT::JSON_REPLACE:
SetQuery ( tSource.ReadAll() );
if ( tSource.GetError() )
return nullptr;
else
return std::make_unique<HttpHandler_JsonInsert_c> ( sQuery, eEndpoint==EHTTP_ENDPOINT::JSON_INDEX || eEndpoint==EHTTP_ENDPOINT::JSON_REPLACE ); // json
case EHTTP_ENDPOINT::JSON_UPDATE:
SetQuery ( tSource.ReadAll() );
if ( tSource.GetError() )
return nullptr;
else
return std::make_unique<HttpHandler_JsonUpdate_c> ( sQuery ); // json
case EHTTP_ENDPOINT::JSON_DELETE:
SetQuery ( tSource.ReadAll() );
if ( tSource.GetError() )
return nullptr;
else
return std::make_unique<HttpHandler_JsonDelete_c> ( sQuery ); // json
case EHTTP_ENDPOINT::JSON_BULK:
return std::make_unique<HttpHandler_JsonBulk_c> ( tSource, tOptions ); // json
case EHTTP_ENDPOINT::PQ:
SetQuery ( tSource.ReadAll() );
if ( tSource.GetError() )
return nullptr;
else
return std::make_unique<HttpHandlerPQ_c> ( sQuery, tOptions ); // json
case EHTTP_ENDPOINT::ES_BULK:
SetQuery ( tSource.ReadAll() );
if ( tSource.GetError() )
return nullptr;
else
return std::make_unique<HttpHandlerEsBulk_c> ( sQuery, eRequestType, tOptions );
case EHTTP_ENDPOINT::TOTAL:
SetQuery ( tSource.ReadAll() );
if ( tSource.GetError() )
return nullptr;
else
return CreateCompatHandler ( sQuery, eRequestType, tOptions );
default:
break;
}
return nullptr;
}
HttpProcessResult_t ProcessHttpQuery ( CharStream_c & tSource, Str_t & sSrcQuery, OptionsHash_t & hOptions, CSphVector<BYTE> & dResult, bool bNeedHttpResponse, http_method eRequestType )
{
TRACE_CONN ( "conn", "ProcessHttpQuery" );
HttpProcessResult_t tRes;
const CSphString & sEndpoint = hOptions["endpoint"];
tRes.m_eEndpoint = StrToHttpEndpoint ( sEndpoint );
std::unique_ptr<HttpHandler_c> pHandler = CreateHttpHandler ( tRes.m_eEndpoint, tSource, sSrcQuery, hOptions, eRequestType );
if ( !pHandler )
{
if ( tRes.m_eEndpoint == EHTTP_ENDPOINT::INDEX )
{
HttpHandlerIndexPage ( dResult );
} else
{
DumpHttp ( eRequestType, sEndpoint, tSource.ReadAll() );
tRes.m_eReplyHttpCode = EHTTP_STATUS::_501;
if ( tSource.GetError() )
{
tRes.m_sError = tSource.GetErrorMessage();
if ( tRes.m_sError.Begins ( "length out of bounds" ) )
tRes.m_eReplyHttpCode = EHTTP_STATUS::_413;
} else
{
tRes.m_sError.SetSprintf ( "/%s - unsupported endpoint", sEndpoint.cstr() );
}
sphHttpErrorReply ( dResult, tRes.m_eReplyHttpCode, tRes.m_sError.cstr() );
}
return tRes;
}
// will be processed by buddy right after source data got parsed
if ( tRes.m_eEndpoint == EHTTP_ENDPOINT::CLI )
return tRes;
pHandler->SetErrorFormat ( bNeedHttpResponse );
tRes.m_bOk = pHandler->Process();
tRes.m_sError = pHandler->GetError();
tRes.m_eReplyHttpCode = pHandler->GetStatusCode();
dResult = std::move ( pHandler->GetResult() );
return tRes;
}
void sphProcessHttpQueryNoResponce ( const CSphString & sEndpoint, const CSphString & sQuery, CSphVector<BYTE> & dResult )
{
OptionsHash_t hOptions;
hOptions.Add ( sEndpoint, "endpoint" );
BlobStream_c tQuery ( sQuery );
Str_t sSrcQuery;
HttpProcessResult_t tRes = ProcessHttpQuery ( tQuery, sSrcQuery, hOptions, dResult, false, HTTP_GET );
ProcessHttpQueryBuddy ( tRes, sSrcQuery, hOptions, dResult, false, HTTP_GET );
}
static bool IsCompressed ( const OptionsHash_t & hOptions )
{
const CSphString * pEncoding = hOptions ( "content-encoding" );
if ( !pEncoding )
return false;
return ( *pEncoding=="gzip" );
}
bool HttpRequestParser_c::ProcessClientHttp ( AsyncNetInputBuffer_c& tIn, CSphVector<BYTE>& dResult )
{
assert ( !m_szError );
std::unique_ptr<CharStream_c> pSource;
bool bCompressed = IsCompressed ( m_hOptions );
if ( m_tParser.flags & F_CHUNKED )
{
pSource = std::make_unique<ChunkedSocketStream_c> ( &tIn, &m_tParser, m_bBodyDone, std::move ( m_dParsedBodies ), m_iLastParsed );
} else
{
// for non-chunked - need to throw out beginning of the packet (with header). Only body rest in the buffer.
tIn.PopTail ( m_iLastParsed - ParsedBodyLength() );
int iFullLength = ParsedBodyLength() + ( (int)m_tParser.content_length > 0 ? (int)m_tParser.content_length : 0 );
pSource = std::make_unique<RawSocketStream_c> ( &tIn, iFullLength, bCompressed );
}
EHTTP_ENDPOINT eEndpoint = StrToHttpEndpoint ( m_sEndpoint );
if ( IsLogManagementEnabled() && eEndpoint==EHTTP_ENDPOINT::TOTAL && m_sEndpoint.Ends ( "_bulk" ) )
eEndpoint = EHTTP_ENDPOINT::ES_BULK;
HttpProcessResult_t tRes;
Str_t sSrcQuery;
if ( bCompressed && !HasGzip() )
{
// 14.11 Content-Encoding
// If the content-coding of an entity in a request message is not acceptable to the origin server, the server SHOULD respond with a status code of 415 (Unsupported Media Type)
tRes.m_eReplyHttpCode = EHTTP_STATUS::_415;
tRes.m_bOk = false;
tRes.m_sError = "gzip error: unpack is not supported, rebuild with zlib";
sphHttpErrorReply ( dResult, tRes.m_eReplyHttpCode, tRes.m_sError.cstr() );
} else if ( bCompressed && ( m_tParser.flags & F_CHUNKED ) )
{
tRes.m_eReplyHttpCode = EHTTP_STATUS::_415;
tRes.m_bOk = false;
tRes.m_sError = "can not process chunked transfer-coding along with gzip";
sphHttpErrorReply ( dResult, tRes.m_eReplyHttpCode, tRes.m_sError.cstr() );
} else
{
tRes = ProcessHttpQuery ( *pSource, sSrcQuery, m_hOptions, dResult, true, m_eType );
}
return ProcessHttpQueryBuddy ( tRes, sSrcQuery, m_hOptions, dResult, true, m_eType );
}
void sphHttpErrorReply ( CSphVector<BYTE> & dData, EHTTP_STATUS eCode, const char * szError )
{
JsonObj_c tErr;
tErr.AddStr ( "error", szError );
CSphString sJsonError = tErr.AsString();
HttpBuildReply ( dData, eCode, FromStr (sJsonError), false );
}
static void EncodePercolateMatchResult ( const PercolateMatchResult_t & tRes, const CSphFixedVector<int64_t> & dDocids, const CSphString & sIndex, JsonEscapedBuilder & tOut )
{
ScopedComma_c sRootBlock ( tOut, ",", "{", "}" );
// column names
tOut.Sprintf ( R"("took":%d,"timed_out":false)", ( int ) ( tRes.m_tmTotal / 1000 ));
// hits {
ScopedComma_c sHitsBlock ( tOut, ",", R"("hits":{)", "}");
tOut.Sprintf ( R"("total":%d,"max_score":1)", tRes.m_dQueryDesc.GetLength()); // FIXME!!! track and provide weight
if ( tRes.m_bVerbose )
tOut.Sprintf ( R"("early_out_queries":%d,"matched_queries":%d,"matched_docs":%d,"only_terms_queries":%d,"total_queries":%d)",
tRes.m_iEarlyOutQueries, tRes.m_iQueriesMatched, tRes.m_iDocsMatched, tRes.m_iOnlyTerms, tRes.m_iTotalQueries );
// documents
tOut.StartBlock ( ",", R"("hits":[)", "]");
int iDocOff = 0;
for ( const auto& tDesc : tRes.m_dQueryDesc )
{
ScopedComma_c sQueryComma ( tOut, ",","{"," }");
tOut.Sprintf ( R"("table":"%s","_type":"doc","_id":"%U","_score":"1")", sIndex.cstr(), tDesc.m_iQUID );
{
ScopedComma_c sBrackets ( tOut, ",", R"("_source":{)", "}");
if ( !tDesc.m_bQL )
{
tOut.Sprintf ( R"("query":%s)", tDesc.m_sQuery.cstr() );
} else
{
ScopedComma_c sBrackets ( tOut, nullptr, R"("query": {"ql":)", "}");
tOut.AppendEscapedWithComma ( tDesc.m_sQuery.cstr() );
}
if ( !tDesc.m_sTags.IsEmpty() )
tOut.Sprintf ( R"("tags":"%s")", tDesc.m_sTags.cstr() );
}
// document count + document id(s)
if ( tRes.m_bGetDocs )
{
ScopedComma_c sFields ( tOut, ",",R"("fields":{"_percolator_document_slot": [)", "] }");
int iDocs = tRes.m_dDocs[iDocOff];
for ( int iDoc = 1; iDoc<=iDocs; ++iDoc )
{
auto iRow = tRes.m_dDocs[iDocOff + iDoc];
tOut.Sprintf ("%l", DocID_t ( dDocids.IsEmpty () ? iRow : dDocids[iRow] ) );
}
iDocOff += iDocs + 1;
}
}
tOut.FinishBlock ( false ); // hits[]
// all the rest blocks (root, hits) will be auto-closed here.
}
bool HttpHandlerPQ_c::DoCallPQ ( const CSphString & sIndex, const JsonObj_c & tPercolate, bool bVerbose )
{
CSphString sWarning, sTmp;
BlobVec_t dDocs;
// single document
JsonObj_c tJsonDoc = tPercolate.GetObjItem ( "document", sTmp );
if ( tJsonDoc )
{
auto & tDoc = dDocs.Add();
if ( !bson::JsonObjToBson ( tJsonDoc, tDoc, g_bJsonAutoconvNumbers, g_bJsonKeynamesToLowercase ) )
{
ReportError ( "Bad cjson", EHTTP_STATUS::_400 );
return false;
}
}
// multiple documents
JsonObj_c tJsonDocs = tPercolate.GetArrayItem ( "documents", m_sError, true );
if ( !m_sError.IsEmpty() )
{
ReportError ( EHTTP_STATUS::_400 );
return false;
}
for ( auto i : tJsonDocs )
{
auto & tDoc = dDocs.Add();
if ( !bson::JsonObjToBson ( i, tDoc, g_bJsonAutoconvNumbers, g_bJsonKeynamesToLowercase ) )
{
ReportError ( "Bad cjson", EHTTP_STATUS::_400 );
return false;
}
}
if ( dDocs.IsEmpty() )
{
ReportError ( "no documents found", EHTTP_STATUS::_400 );
return false;
}
PercolateOptions_t tOpts;
tOpts.m_sIndex = sIndex;
tOpts.m_bGetDocs = true;
tOpts.m_bVerbose = bVerbose;
tOpts.m_bGetQuery = true;
// fixme! id alias here is 'id' or 'uid'. Process it!
CSphSessionAccum tAcc;
CPqResult tResult;
tResult.m_dResult.m_bGetFilters = false;
PercolateMatchDocuments ( dDocs, tOpts, tAcc, tResult );
JsonEscapedBuilder sRes;
EncodePercolateMatchResult ( tResult.m_dResult, tResult.m_dDocids, sIndex, sRes );
BuildReply ( sRes, EHTTP_STATUS::_200 );
return true;
}
static void EncodePercolateQueryResult ( bool bReplace, const CSphString & sIndex, int64_t iID, StringBuilder_c & tOut )
{
if ( bReplace )
tOut.Sprintf (R"({"table":"%s","type":"doc","_id":"%U","result":"updated","forced_refresh":true})", sIndex.cstr(), iID);
else
tOut.Sprintf ( R"({"table":"%s","type":"doc","_id":"%U","result":"created"})", sIndex.cstr (), iID );
}
bool HttpHandlerPQ_c::InsertOrReplaceQuery ( const CSphString & sIndex, const JsonObj_c & tJsonQuery, const JsonObj_c & tRoot, CSphString * pUID, bool bReplace )
{
CSphString sTmp, sWarning;
bool bQueryQL = true;
CSphQuery tQuery;
const char * sQuery = nullptr;
JsonObj_c tQueryQL = tJsonQuery.GetStrItem ( "ql", sTmp );
if ( tQueryQL )
sQuery = tQueryQL.SzVal();
else
{
bQueryQL = false;
if ( !ParseJsonQueryFilters ( tJsonQuery, tQuery, m_sError, sWarning ) )
{
ReportError ( EHTTP_STATUS::_400 );
return false;
}
if ( NonEmptyQuery ( tJsonQuery ) )
sQuery = tQuery.m_sQuery.cstr();
}
if ( !sQuery || *sQuery=='\0' )
{
ReportError ( "no query found", EHTTP_STATUS::_400 );
return false;
}
int64_t iID = 0;
if ( pUID && !pUID->IsEmpty() )
iID = strtoll ( pUID->cstr(), nullptr, 10 );
JsonObj_c tTagsArray = tRoot.GetArrayItem ( "tags", m_sError, true );
if ( !m_sError.IsEmpty() )
{
ReportError ( EHTTP_STATUS::_400 );
return false;
}
StringBuilder_c sTags (", ");
for ( const auto & i : tTagsArray )
sTags << i.SzVal();
JsonObj_c tFilters = tRoot.GetStrItem ( "filters", m_sError, true );
if ( !m_sError.IsEmpty() )
{
ReportError ( EHTTP_STATUS::_400 );
return false;
}
if ( tFilters && !bQueryQL && tQuery.m_dFilters.GetLength() )
{
ReportError ( "invalid combination of SphinxQL and query filter provided", EHTTP_STATUS::_501 );
return false;
}
CSphVector<CSphFilterSettings> dFilters;
CSphVector<FilterTreeItem_t> dFilterTree;
if ( tFilters )
{
auto pServed = GetServed ( sIndex );
if ( !CheckValid ( pServed, sIndex, IndexType_e::PERCOLATE ) )
return false;
RIdx_T<const PercolateIndex_i*> pIndex { pServed };
if ( !PercolateParseFilters ( tFilters.SzVal(), SPH_COLLATION_UTF8_GENERAL_CI, pIndex->GetInternalSchema (), dFilters, dFilterTree, m_sError ) )
{
ReportError ( EHTTP_STATUS::_400 );
return false;
}
} else
{
dFilters.SwapData ( tQuery.m_dFilters );
dFilterTree.SwapData ( tQuery.m_dFilterTree );
}
// scope for index lock
bool bOk = false;
{
auto pServed = GetServed ( sIndex );
if ( !CheckValid ( pServed, sIndex, IndexType_e::PERCOLATE ))
return false;
RIdx_T<PercolateIndex_i*> pIndex { pServed };
PercolateQueryArgs_t tArgs ( dFilters, dFilterTree );
tArgs.m_sQuery = sQuery;
tArgs.m_sTags = sTags.cstr();
tArgs.m_iQUID = iID;
tArgs.m_bReplace = bReplace;
tArgs.m_bQL = bQueryQL;
// add query
auto pStored = pIndex->CreateQuery ( tArgs, m_sError );
if ( pStored )
{
auto* pSession = session::GetClientSession();
auto& tAcc = pSession->m_tAcc;
auto* pAccum = tAcc.GetAcc( pIndex, m_sError );
ReplicationCommand_t * pCmd = pAccum->AddCommand ( ReplCmd_e::PQUERY_ADD, sIndex );
// refresh query's UID for reply as it might be auto-generated
iID = pStored->m_iQUID;
pCmd->m_pStored = std::move ( pStored );
bOk = HandleCmdReplicate ( *pAccum );
TlsMsg::MoveError ( m_sError );
}
}
if ( !bOk )
{
ReportError ( EHTTP_STATUS::_500 );
} else
{
StringBuilder_c sRes;
EncodePercolateQueryResult ( bReplace, sIndex, iID, sRes );
BuildReply ( sRes, EHTTP_STATUS::_200 );
}
return bOk;
}
// for now - forcibly route query as /json/search POST {"table":"<idx>"}. Later matter of deprecate/delete
bool HttpHandlerPQ_c::ListQueries ( const CSphString & sIndex )
{
StringBuilder_c sQuery;
sQuery.Sprintf(R"({"table":"%s"})", sIndex.scstr());
auto pHandler = std::make_unique<HttpHandler_JsonSearch_c> ( (Str_t)sQuery, m_tOptions ) ;
if ( !pHandler )
return false;
pHandler->SetErrorFormat (m_bNeedHttpResponse);
pHandler->Process ();
m_dData = std::move ( pHandler->GetResult ());
return true;
}
bool HttpHandlerPQ_c::Delete ( const CSphString & sIndex, const JsonObj_c & tRoot )
{
auto* pSession = session::GetClientSession();
auto& tAcc = pSession->m_tAcc;
auto* pAccum = tAcc.GetAcc ();
ReplicationCommand_t * pCmd = pAccum->AddCommand ( ReplCmd_e::PQUERY_DELETE, sIndex );
JsonObj_c tTagsArray = tRoot.GetArrayItem ( "tags", m_sError, true );
if ( !m_sError.IsEmpty() )
{
ReportError ( EHTTP_STATUS::_400 );
return false;
}
StringBuilder_c sTags ( ", " );
for ( const auto & i : tTagsArray )
sTags << i.SzVal();
JsonObj_c tUidsArray = tRoot.GetArrayItem ( "id", m_sError, true );
if ( !m_sError.IsEmpty() )
{
ReportError ( EHTTP_STATUS::_400 );
return false;
}
for ( const auto & i : tUidsArray )
pCmd->m_dDeleteQueries.Add ( i.IntVal() );
if ( !sTags.GetLength() && !pCmd->m_dDeleteQueries.GetLength() )
{
ReportError ( "no tags or id field arrays found", EHTTP_STATUS::_400 );
return false;
}
pCmd->m_sDeleteTags = sTags.cstr();
uint64_t tmStart = sphMicroTimer();
int iDeleted = 0;
bool bOk = HandleCmdReplicateDelete ( *pAccum, iDeleted );
TlsMsg::MoveError ( m_sError );
uint64_t tmTotal = sphMicroTimer() - tmStart;
if ( !bOk )
{
FormatError ( EHTTP_STATUS::_400, "%s", m_sError.cstr() );
return false;
}
StringBuilder_c tOut;
tOut.Sprintf (R"({"took":%d,"timed_out":false,"deleted":%d,"total":%d,"failures":[]})",
( int ) ( tmTotal / 1000 ), iDeleted, iDeleted );
BuildReply ( tOut, EHTTP_STATUS::_200 );
return true;
}
bool HttpHandlerPQ_c::Process()
{
TRACE_CONN ( "conn", "HttpHandlerPQ_c::Process" );
CSphString * sEndpoint = m_tOptions ( "endpoint" );
if ( !sEndpoint || sEndpoint->IsEmpty() )
{
FormatError ( EHTTP_STATUS::_400, "invalid empty endpoint, should be pq/index_name/operation");
return false;
}
assert ( sEndpoint->Begins ( "json/pq/" ) || sEndpoint->Begins ( "pq/" ) );
const char * sEndpointMethod = sEndpoint->cstr() + sizeof("pq/") - 1;
if ( sEndpoint->Begins ( "json/pq/" ) )
sEndpointMethod = sEndpoint->cstr() + sizeof("json/pq/") - 1;
StrVec_t dPoints;
sphSplit ( dPoints, sEndpointMethod, "/" );
if ( dPoints.GetLength()<2 )
{
FormatError ( EHTTP_STATUS::_400, "invalid endpoint '%s', should be pq/index_name/operation", sEndpoint->scstr() );
return false;
}
const CSphString & sIndex = dPoints[0];
const CSphString & sOp = dPoints[1];
CSphString * pUID = nullptr;
if ( dPoints.GetLength()>2 )
pUID = dPoints.Begin() + 2;
enum class PercolateOp_e
{
UNKNOWN,
ADD,
DEL,
SEARCH
} eOp = PercolateOp_e::UNKNOWN;
if ( sOp=="_delete_by_query" )
eOp = PercolateOp_e::DEL;
else if ( sOp=="doc" )
eOp = PercolateOp_e::ADD;
else if ( sOp=="search" )
eOp = PercolateOp_e::SEARCH;
if ( IsEmpty ( m_sQuery ) )
return ListQueries ( sIndex );
const JsonObj_c tRoot ( m_sQuery );
if ( !tRoot )
{
ReportError ( "bad JSON object", EHTTP_STATUS::_400 );
return false;
}
if ( !tRoot.Size() )
return ListQueries ( sIndex );
if ( eOp==PercolateOp_e::UNKNOWN )
{
m_sError.SetSprintf ( "invalid percolate operation '%s', should be one of 'search' or 'doc' or '_delete_by_query'", sOp.cstr() );
ReportError ( EHTTP_STATUS::_400 );
return false;
}
JsonObj_c tQuery = tRoot.GetObjItem ( "query", m_sError, ( eOp==PercolateOp_e::DEL ) );
if ( !tQuery && ( eOp!=PercolateOp_e::DEL ) )
{
ReportError ( EHTTP_STATUS::_400 );
return false;
}
JsonObj_c tPerc = ( ( eOp==PercolateOp_e::SEARCH ) ? tQuery.GetObjItem ( "percolate", m_sError ) : JsonNull );
if ( ( eOp==PercolateOp_e::SEARCH ) && !tPerc )
{
ReportError ( EHTTP_STATUS::_400 );
return false;
}
bool bVerbose = false;
JsonObj_c tVerbose = tRoot.GetItem ( "verbose" );
if ( tVerbose )
{
if ( tVerbose.IsDbl() )
bVerbose = tVerbose.DblVal()!=0.0;
else if ( tVerbose.IsInt() )
bVerbose = tVerbose.IntVal()!=0;
else if ( tVerbose.IsBool() )
bVerbose = tVerbose.BoolVal();
}
if ( eOp==PercolateOp_e::SEARCH )
return DoCallPQ ( sIndex, tPerc, bVerbose );
else if ( eOp==PercolateOp_e::DEL )
return Delete ( sIndex, tRoot );
else
{
bool bRefresh = false;
CSphString * pRefresh = m_tOptions ( "refresh" );
if ( pRefresh && !pRefresh->IsEmpty() )
{
if ( *pRefresh=="0" )
bRefresh = false;
else if ( *pRefresh=="1" )
bRefresh = true;
}
return InsertOrReplaceQuery ( sIndex, tQuery, tRoot, pUID, bRefresh );
}
}
static bool ParseMetaLine ( const char * sLine, BulkDoc_t & tDoc, CSphString & sError )
{
JsonObj_c tLineMeta ( sLine );
JsonObj_c tAction = tLineMeta[0];
if ( !tAction )
{
sError = "no statement found";
return false;
}
tDoc.m_sAction = tAction.Name();
if ( !tAction.IsObj() )
{
sError.SetSprintf ( "statement %s should be an object", tDoc.m_sAction.cstr() );
return false;
}
JsonObj_c tIndex = tAction.GetStrItem ( "_index", sError );
if ( !tIndex )
return false;
tDoc.m_sIndex = tIndex.StrVal();
JsonObj_c tId = tAction.GetItem ( "_id" );
if ( tId )
{
if ( tId.IsNum() )
tDoc.m_tDocid = tId.IntVal();
else if ( tId.IsStr() )
tDoc.m_tDocid = GetDocID ( tId.SzVal() );
else if ( tId.IsNull() )
tDoc.m_tDocid = 0;
else
{
sError.SetSprintf ( "_id should be an int or string" );
return false;
}
}
return true;
}
static bool AddDocid ( SqlStmt_t & tStmt, DocID_t & tDocId, CSphString & sError )
{
int iDocidPos = tStmt.m_dInsertSchema.GetFirst ( [&] ( const CSphString & sName ) { return sName=="id"; } );
if ( iDocidPos!=-1 )
{
SqlInsert_t & tVal = tStmt.m_dInsertValues[iDocidPos];
// check and convert to int
if ( tVal.m_iType!=SqlInsert_t::CONST_INT )
{
tVal.SetValueInt ( GetDocID ( tVal.m_sVal.cstr() ), false );
tVal.m_iType = SqlInsert_t::CONST_INT;
}
DocID_t tSrcDocid = (int64_t)tVal.GetValueUint();
// can not set id at the same time via es meta and via document id property
if ( tDocId && tDocId!=tSrcDocid )
{
sError = "id has already been specified";
return false;
}
tDocId = tSrcDocid;
return true;
}
if ( !tDocId )
return true;
tStmt.m_dInsertSchema.Add ( sphGetDocidName() );
SqlInsert_t & tId = tStmt.m_dInsertValues.Add();
tId.m_iType = SqlInsert_t::CONST_INT;
tId.SetValueInt(tDocId);
tStmt.m_iSchemaSz = tStmt.m_dInsertSchema.GetLength();
return true;
}
static bool ParseSourceLine ( const char * sLine, const CSphString & sAction, SqlStmt_t & tStmt, DocID_t & tDocId, CSphString & sError )
{
// FIXME!!! update and delete ES compat endpoints
if ( sAction=="index" )
{
JsonObj_c tRoot ( sLine );
if ( !ParseJsonInsertSource ( tRoot, tStmt, true, sError ) )
return false;
if ( !AddDocid ( tStmt, tDocId, sError ) )
return false;
} else if ( sAction=="create" )
{
JsonObj_c tRoot ( sLine );
if ( !ParseJsonInsertSource ( tRoot, tStmt, false, sError ) )
return false;
if ( !AddDocid ( tStmt, tDocId, sError ) )
return false;
} else if ( sAction=="update" )
{
JsonObj_c tUpd ( FromSz ( sLine ) );
tUpd.AddStr ( "table", tStmt.m_sIndex );
tUpd.AddInt ( "id", tDocId );
if ( !ParseJsonUpdate ( tUpd, tStmt, tDocId, sError ) )
return false;
} else if ( sAction=="delete" )
{
tStmt.m_eStmt = STMT_DELETE;
tStmt.m_tQuery.m_sSelect = "id";
CSphFilterSettings & tFilter = tStmt.m_tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_VALUES;
tFilter.m_dValues.Add ( tDocId );
tFilter.m_sAttrName = "id";
}
// _bulk could have cluster:index format
SqlParser_SplitClusterIndex ( tStmt.m_sIndex, &tStmt.m_sCluster );
return true;
}
char * SkipSpace ( char * p )
{
while ( sphIsSpace ( *p ) )
p++;
return p;
}
static CSphString sphHttpEndpointToStr ( EHTTP_ENDPOINT eEndpoint )
{
assert ( eEndpoint < EHTTP_ENDPOINT::TOTAL );
return g_dEndpoints[(int)eEndpoint].m_szName1;
}
bool Ends ( const Str_t tVal, const char * sSuffix )
{
if ( IsEmpty ( tVal ) || !sSuffix )
return false;
auto iSuffix = (int) strlen ( sSuffix );
if ( tVal.second<iSuffix )
return false;
return strncmp ( tVal.first + tVal.second - iSuffix, sSuffix, iSuffix )==0;
}
void HttpHandlerEsBulk_c::ReportLogError ( const char * sError, HttpErrorType_e eType, EHTTP_STATUS eStatus, bool bLogOnly )
{
if ( !bLogOnly )
ReportError ( sError, eType, eStatus );
for ( char * sCur = (char *)GetBody().first; sCur<GetBody().first+GetBody().second; sCur++ )
{
if ( *sCur=='\0' )
*sCur = '\n';
}
const CSphString * pUrl = GetOptions() ( "full_url" );
HTTPINFO << sError << "\n" << ( pUrl ? pUrl->scstr() : "" ) << "\n" << GetBody().first;
}
bool HttpHandlerEsBulk_c::Validate()
{
CSphString sError;
CSphString * pOptContentType = GetOptions() ( "content-type" );
if ( !pOptContentType )
{
ReportLogError ( "Content-Type must be set", HttpErrorType_e::IllegalArgument, EHTTP_STATUS::_400, false );
return false;
}
// HTTP field could have multiple values
StrVec_t dOptContentType = sphSplit ( pOptContentType->cstr(), ",; " );
if ( !dOptContentType.Contains ( "application/x-ndjson" ) && !dOptContentType.Contains ( "application/json" ) )
{
sError.SetSprintf ( "Content-Type header [%s] is not supported", pOptContentType->cstr() );
ReportLogError ( sError.cstr(), HttpErrorType_e::IllegalArgument, EHTTP_STATUS::_400, false );
return false;
}
if ( IsEmpty ( GetBody() ) )
{
ReportLogError ( "request body is required", HttpErrorType_e::Parse, EHTTP_STATUS::_400, false );
return false;
}
if ( !Ends ( GetBody(), "\n" ) )
{
ReportLogError ( "The bulk request must be terminated by a newline [\n]", HttpErrorType_e::IllegalArgument, EHTTP_STATUS::_400, false );
return false;
}
return true;
}
bool HttpHandlerEsBulk_c::Process()
{
if ( !Validate() )
return false;
auto & tCrashQuery = GlobalCrashQueryGetRef();
tCrashQuery.m_dQuery = S2B ( GetBody() );
CSphVector<Str_t> dLines;
SplitNdJson ( GetBody(), [&] ( const char * sLine, int iLen ) { dLines.Add ( Str_t ( sLine, iLen ) ); } );
CSphString sError;
CSphVector<BulkDoc_t> dDocs;
dDocs.Reserve ( dLines.GetLength() / 2 );
bool bNextLineMeta = true;
for ( const Str_t & tLine : dLines )
{
if ( !bNextLineMeta )
{
dDocs.Last().m_tDocLine = tLine;
bNextLineMeta = true;
} else
{
// skip empty lines if they are meta information
if ( IsEmpty ( tLine ) )
continue;
// any bad meta result in general error
BulkDoc_t & tDoc = dDocs.Add();
if ( !ParseMetaLine ( tLine.first, tDoc, sError ) )
{
ReportLogError ( sError.cstr(), HttpErrorType_e::ActionRequestValidation, EHTTP_STATUS::_400, false );
return false;
}
if ( tDoc.m_sAction=="delete" )
{
tDoc.m_tDocLine = tLine;
bNextLineMeta = true;
} else
{
bNextLineMeta = false;
}
}
}
CSphVector<BulkTnx_t> dTnx;
const BulkDoc_t * pLastDoc = dDocs.Begin();
for ( const BulkDoc_t * pCurDoc = pLastDoc + 1; pCurDoc<dDocs.End(); pCurDoc++ )
{
// chain the same statements to the same index but not the updates
if ( pLastDoc->m_sIndex==pCurDoc->m_sIndex && pLastDoc->m_sAction==pCurDoc->m_sAction && pCurDoc->m_sAction!="update" )
continue;
BulkTnx_t & tTnx = dTnx.Add();
tTnx.m_iFrom = pLastDoc - dDocs.Begin();
tTnx.m_iCount = pCurDoc - pLastDoc;
pLastDoc = pCurDoc;
}
if ( pLastDoc )
{
BulkTnx_t & tTnx = dTnx.Add();
tTnx.m_iFrom = pLastDoc - dDocs.Begin();
tTnx.m_iCount = dDocs.GetLength() - tTnx.m_iFrom;
}
JsonObj_c tItems ( true );
bool bOk = ProcessTnx ( dTnx, dDocs, tItems );
JsonObj_c tRoot;
tRoot.AddItem ( "items", tItems );
tRoot.AddBool ( "errors", !bOk );
tRoot.AddInt ( "took", 1 ); // FIXME!!! add delta
BuildReply ( tRoot.AsString(), ( bOk ? EHTTP_STATUS::_200 : EHTTP_STATUS::_409 ) );
if ( !bOk )
ReportLogError ( "failed to commit", HttpErrorType_e::Unknown, EHTTP_STATUS::_400, true );
return bOk;
}
static void AddEsReply ( const BulkDoc_t & tDoc, JsonObj_c & tRoot )
{
const JsonObj_c tRefShards ( "{ \"total\": 1, \"successful\": 1, \"failed\": 0 }" );
char sBuf[70];
snprintf ( sBuf, sizeof(sBuf), UINT64_FMT, (uint64_t)tDoc.m_tDocid );
const char * sActionRes = "created";
if ( tDoc.m_sAction=="delete" )
sActionRes = "deleted";
else if ( tDoc.m_sAction=="update" )
sActionRes = "updated";
JsonObj_c tShard ( tRefShards.Clone() );
JsonObj_c tRes;
tRes.AddStr ( "_index", tDoc.m_sIndex.cstr() );
tRes.AddStr ( "_type", "doc" );
tRes.AddStr ( "_id", sBuf );
tRes.AddInt ( "_version", 1 );
tRes.AddStr ( "result", sActionRes );
tRes.AddItem ( "_shards", tShard );
tRes.AddInt ( "_seq_no", 0 );
tRes.AddInt ( "_primary_term", 1 );
tRes.AddInt ( "status", 201 );
JsonObj_c tAction;
tAction.AddItem ( tDoc.m_sAction.cstr(), tRes );
tRoot.AddItem ( tAction );
}
static void AddEsError ( int iReply, const CSphString & sError, const char * sErrorType, const BulkDoc_t & tDoc, JsonObj_c & tRoot )
{
char sBuf[70];
snprintf ( sBuf, sizeof(sBuf), UINT64_FMT, (uint64_t)tDoc.m_tDocid );
JsonObj_c tErrorObj;
tErrorObj.AddStr ( "type", sErrorType );
tErrorObj.AddStr ( "reason", sError.cstr() );
JsonObj_c tRes;
tRes.AddStr ( "_index", tDoc.m_sIndex.cstr() );
tRes.AddStr ( "_type", "doc" );
tRes.AddStr ( "_id", sBuf );
tRes.AddInt ( "status", 400 );
tRes.AddItem ( "error", tErrorObj );
JsonObj_c tAction;
tAction.AddItem ( tDoc.m_sAction.cstr(), tRes );
if ( iReply!=-1 )
tRoot.ReplaceItem ( iReply, tAction );
else
tRoot.AddItem ( tAction );
}
bool HttpHandlerEsBulk_c::ProcessTnx ( const VecTraits_T<BulkTnx_t> & dTnx, VecTraits_T<BulkDoc_t> & dDocs, JsonObj_c & tItems )
{
bool bOk = true;
CSphVector<std::pair<int, CSphString>> dErrors;
for ( const BulkTnx_t & tTnx : dTnx )
{
const CSphString & sIdx = dDocs[tTnx.m_iFrom].m_sIndex;
assert ( !sIdx.IsEmpty() );
ProcessBegin ( sIdx );
bool bUpdate = false;
bOk &= dErrors.IsEmpty();
dErrors.Resize ( 0 );
for ( int i = 0; i<tTnx.m_iCount; i++ )
{
int iDoc = tTnx.m_iFrom + i;
BulkDoc_t & tDoc = dDocs[iDoc];
if ( IsEmpty ( tDoc.m_tDocLine ) )
{
dErrors.Add ( { iDoc, "failed to parse, document is empty" } );
continue;
}
SqlStmt_t tStmt;
tStmt.m_tQuery.m_sIndexes = tDoc.m_sIndex;
tStmt.m_sIndex = tDoc.m_sIndex;
tStmt.m_sStmt = tDoc.m_tDocLine.first;
bool bParsed = ParseSourceLine ( tDoc.m_tDocLine.first, tDoc.m_sAction, tStmt, tDoc.m_tDocid, m_sError );
if ( !bParsed )
{
dErrors.Add ( { iDoc, m_sError } );
continue;
}
bool bAction = false;
JsonObj_c tResult = JsonNull;
switch ( tStmt.m_eStmt )
{
case STMT_INSERT:
case STMT_REPLACE:
bAction = ProcessInsert ( tStmt, tDoc.m_tDocid, tResult, m_sError, ResultSetFormat_e::ES );
break;
case STMT_UPDATE:
tStmt.m_sEndpoint = sphHttpEndpointToStr ( EHTTP_ENDPOINT::JSON_UPDATE );
bAction = ProcessUpdate ( tDoc.m_tDocLine, tStmt, tDoc.m_tDocid, tResult, m_sError );
bUpdate = true;
break;
case STMT_DELETE:
tStmt.m_sEndpoint = sphHttpEndpointToStr ( EHTTP_ENDPOINT::JSON_DELETE );
bAction = ProcessDelete ( tDoc.m_tDocLine, tStmt, tDoc.m_tDocid, tResult, m_sError, ResultSetFormat_e::ES );
break;
default:
sphWarning ( "unknown statement \"%s\":%s", tStmt.m_sStmt, tDoc.m_tDocLine.first );
break; // ignore statement as ES does
}
if ( !bAction )
dErrors.Add ( { iDoc, tResult.GetItem ( "error" ).GetItem ( "type" ).StrVal() } );
}
// FIXME!!! check commit of empty accum
JsonObj_c tResult;
bool bCommited = ProcessCommitRollback ( FromStr ( sIdx ), DocID_t(), tResult, m_sError );
if ( bCommited )
{
if ( bUpdate && !GetLastUpdated() )
{
assert ( tTnx.m_iCount==1 );
const BulkDoc_t & tUpdDoc = dDocs[tTnx.m_iFrom];
CSphString sUpdError;
sUpdError.SetSprintf ( "[_doc][" INT64_FMT "]: document missing", tUpdDoc.m_tDocid );
AddEsError ( -1, sUpdError, "document_missing_exception", tUpdDoc, tItems );
} else
{
for ( int i=0; i<tTnx.m_iCount; i++ )
AddEsReply ( dDocs[tTnx.m_iFrom+i], tItems );
}
} else
{
for ( int i=0; i<tTnx.m_iCount; i++ )
{
AddEsError ( -1, tResult.GetStrItem ( "error", m_sError, false ).StrVal(), "mapper_parsing_exception", dDocs[tTnx.m_iFrom+i], tItems );
}
}
for ( const auto & tErr : dErrors )
AddEsError ( tErr.first, tErr.second, "mapper_parsing_exception", dDocs[tErr.first], tItems );
}
bOk &= dErrors.IsEmpty();
session::SetInTrans ( false );
return bOk;
}
void SplitNdJson ( Str_t sBody, SplitAction_fn && fnAction )
{
const char * sBodyEnd = sBody.first + sBody.second;
while ( sBody.first<sBodyEnd )
{
const char * sNext = sBody.first;
// break on CR or LF
while ( sNext<sBodyEnd && *sNext != '\r' && *sNext != '\n' )
sNext++;
if ( sNext==sBodyEnd )
break;
*(const_cast<char*>(sNext)) = '\0';
fnAction ( sBody.first, sNext-sBody.first );
sBody.first = sNext + 1;
// skip new lines
while ( sBody.first<sBodyEnd && *sBody.first == '\n' )
sBody.first++;
}
}
bool HttpSetLogVerbosity ( const CSphString & sVal )
{
if ( !sVal.Begins( "http_" ) )
return false;
bool bOn = ( sVal.Ends ( "_1" ) || sVal.Ends ( "_on" ) );
if ( sVal.Begins ( "http_bad_req" ) )
g_bLogBadHttpReq = bOn;
else
LOG_LEVEL_HTTP = bOn;
return true;
}
void LogReplyStatus100()
{
HTTPINFO << "100 Continue sent";
}
const char * GetErrorTypeName ( HttpErrorType_e eType )
{
switch ( eType )
{
case HttpErrorType_e::Parse: return "parse_exception";
case HttpErrorType_e::IllegalArgument: return "illegal_argument_exception";
case HttpErrorType_e::ActionRequestValidation: return "action_request_validation_exception";
case HttpErrorType_e::IndexNotFound: return "index_not_found_exception";
case HttpErrorType_e::ContentParse: return "x_content_parse_exception";
case HttpErrorType_e::VersionConflictEngine: return "version_conflict_engine_exception";
case HttpErrorType_e::DocumentMissing: return "document_missing_exception";
case HttpErrorType_e::ResourceAlreadyExists: return "resource_already_exists_exception";
case HttpErrorType_e::AliasesNotFound: return "aliases_not_found_exception";
default:
return nullptr;;
}
}
| 99,595
|
C++
|
.cpp
| 3,068
| 29.691656
| 215
| 0.68559
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,942
|
sphinxstemen.cpp
|
manticoresoftware_manticoresearch/src/sphinxstemen.cpp
|
//
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxdefs.h" // for UNALIGNED_RAM_ACCESS
#if defined(_MSC_VER) && !defined(__cplusplus)
#define inline
#endif
// #define SNOWBALL2011
static unsigned char stem_en_doubles[] = "bdfgmnprt";
static unsigned char vowel_map[] =
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // 0
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // 1
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // 2
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // 3
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // 4
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // 5
//` a b c d e f g h i j k l m n o - NOLINT
"\0\1\0\0\0\1\0\0\0\1\0\0\0\0\0\1" // 6
//p q r s t u v w x y z - NOLINT
"\0\0\0\0\0\1\0\0\0\1\0\0\0\0\0\0" // 7
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // 8
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // 9
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // a
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // b
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // c
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // d
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" // e
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; // f
#define is_vowel(idx) vowel_map[word[idx]]
static inline int stem_en_id ( unsigned char l )
{
unsigned char * v = stem_en_doubles;
while ( *v && *v!=l ) v++;
return ( *v==l ) ? 1 : 0;
}
static inline int stem_en_ivwxy ( unsigned char l )
{
return vowel_map[l] || l=='w' || l=='x' || l=='Y';
}
void stem_en_init ()
{
}
#define EXCBASE(b) ( iword==( ( (int)b[3]<<24 ) + ( (int)b[2]<<16 ) + ( (int)b[1]<<8 ) + (int)b[0] ) )
#define EXC4(a,b) ( len==4 && EXCBASE(b) )
#define EXC5(a,b) ( len==5 && EXCBASE(b) )
#define EXC6(a,b) ( len==6 && EXCBASE(b) && a[4]==b[4] )
#define EXC7(a,b) ( len==7 && EXCBASE(b) && a[4]==b[4] && a[5]==b[5] )
#define EXC8(a,b) ( len==8 && EXCBASE(b) && a[4]==b[4] && a[5]==b[5] && a[6]==b[6] )
void stem_en ( unsigned char * word, int len )
{
int i, first_vowel, r1, r2, iword;
unsigned char has_Y = 0;
if ( len<=2 )
return;
#if UNALIGNED_RAM_ACCESS
iword = *(int*)word;
#else
iword = ( (int)word[3]<<24 ) + ( (int)word[2]<<16 ) + ( (int)word[1]<<8 ) + (int)word[0];
#endif
// check for 3-letter exceptions (currently just one, "sky") and shortcuts
if ( len==3 )
{
#define CHECK3(c1,c2,c3) if ( iword==( (c1<<0)+(c2<<8)+(c3<<16) ) ) return;
#ifdef SNOWBALL2011
#define CHECK3A CHECK3
#else
#define CHECK3A(c1,c2,c3) if ( iword==( (c1<<0)+(c2<<8)+(c3<<16) ) ) { word[2] = '\0'; return; }
#endif
CHECK3 ( 't', 'h', 'e' );
CHECK3 ( 'a', 'n', 'd' );
CHECK3 ( 'y', 'o', 'u' );
CHECK3A ( 'w', 'a', 's' );
CHECK3A ( 'h', 'i', 's' );
CHECK3 ( 'f', 'o', 'r' );
CHECK3 ( 'h', 'e', 'r' );
CHECK3 ( 's', 'h', 'e' );
CHECK3 ( 'b', 'u', 't' );
CHECK3 ( 'h', 'a', 'd' );
CHECK3 ( 's', 'k', 'y' );
}
// check for 4..8-letter exceptions
if ( len>=4 && len<=8 )
{
// check for 4-letter exceptions and shortcuts
if ( len==4 )
{
// shortcuts
if ( iword==0x74616874 ) return; // that
if ( iword==0x68746977 ) return; // with
if ( iword==0x64696173 ) return; // said
if ( iword==0x6d6f7266 ) return; // from
// exceptions
if ( iword==0x7377656e ) return; // news
if ( iword==0x65776f68 ) return; // howe
}
// all those exceptions only have a few valid endings; early check
switch ( word[len-1] )
{
case 'd':
if ( EXC7 ( word, "proceed" ) ) return;
if ( EXC6 ( word, "exceed" ) ) return;
if ( EXC7 ( word, "succeed" ) ) return;
break;
case 'g':
if ( EXC5 ( word, "dying" ) ) { word[1] = 'i'; word[2] = 'e'; word[3] = '\0'; return; }
if ( EXC5 ( word, "lying" ) ) { word[1] = 'i'; word[2] = 'e'; word[3] = '\0'; return; }
if ( EXC5 ( word, "tying" ) ) { word[1] = 'i'; word[2] = 'e'; word[3] = '\0'; return; }
if ( EXC6 ( word, "inning" ) ) return;
if ( EXC6 ( word, "outing" ) ) return;
if ( EXC7 ( word, "canning" ) ) return;
#ifdef SNOWBALL2011
if ( EXC7 ( word, "herring" ) ) return;
if ( EXC7 ( word, "earring" ) ) return;
#endif
break;
case 's':
if ( EXC5 ( word, "skies" ) ) { word[2] = 'y'; word[3] = '\0'; return; }
if ( EXC7 ( word, "innings" ) ) { word[6] = '\0'; return; }
if ( EXC7 ( word, "outings" ) ) { word[6] = '\0';return; }
if ( EXC8 ( word, "cannings" ) ) { word[7] = '\0';return; }
#ifdef SNOWBALL2011
if ( EXC4 ( word, "skis" ) ) { word[3] = '\0'; return; }
if ( EXC5 ( word, "atlas" ) ) return;
if ( EXC6 ( word, "cosmos" ) ) return;
if ( EXC4 ( word, "bias" ) ) return;
if ( EXC5 ( word, "andes" ) ) return;
if ( EXC8 ( word, "herrings" ) ) { word[7] = '\0'; return; }
if ( EXC8 ( word, "earrings" ) ) { word[7] = '\0'; return; }
if ( EXC8 ( word, "proceeds" ) ) { word[7] = '\0'; return; }
if ( EXC7 ( word, "exceeds" ) ) { word[6] = '\0'; return; }
if ( EXC8 ( word, "succeeds" ) ) { word[7] = '\0'; return; }
#endif
break;
case 'y':
if ( EXC4 ( word, "idly" ) ) { word[3] = '\0';return; }
if ( EXC6 ( word, "gently" ) ) { word[5] = '\0';return; }
if ( EXC4 ( word, "ugly" ) ) { word[3] = 'i'; word[4] = '\0'; return; }
if ( EXC5 ( word, "early" ) ) { word[4] = 'i'; word[5] = '\0'; return; }
if ( EXC4 ( word, "only" ) ) { word[3] = 'i'; word[4] = '\0'; return; }
if ( EXC6 ( word, "singly" ) ) { word[5] = '\0'; return; }
break;
}
}
// hide consonant-style y's
if ( word[0]=='y' )
word[0] = has_Y = 'Y';
for ( i=1; i<len; i++ )
if ( word[i]=='y' && is_vowel ( i-1 ) )
word[i] = has_Y = 'Y';
// mark regions
// R1 begins after first "vowel, consonant" sequence in the word
// R2 begins after second "vowel, consonant" sequence
if ( len>=5 && EXCBASE("gene") && word[4]=='r' )
{
r1 = 5; // gener-
first_vowel = 1;
}
#ifdef SNOWBALL2011
else if ( len>=6 && EXCBASE("comm") && word[4]=='u' && word[5]=='n' )
{
r1 = 6; // commun-
first_vowel = 1;
} else if ( len>=5 && EXCBASE("arse") && word[4]=='n' )
{
r1 = 5; // arsen-
first_vowel = 0;
}
#endif
else
{
for ( i=0; i<len && !is_vowel(i); i++ );
first_vowel = i;
for ( i=first_vowel; i<len-2; i++ )
if ( is_vowel(i) && !is_vowel(i+1) )
break;
r1 = i+2;
}
for ( i=r1; i<len-2; i++ )
if ( is_vowel(i) && !is_vowel(i+1) )
break;
r2 = i+2;
#define W(p,c) ( word[len-p]==c )
#define SUFF2(c2,c1) ( len>=2 && W(1,c1) && W(2,c2) )
#define SUFF3(c3,c2,c1) ( len>=3 && W(1,c1) && W(2,c2) && W(3,c3) )
#define SUFF4(c4,c3,c2,c1) ( len>=4 && W(1,c1) && W(2,c2) && W(3,c3) && W(4,c4) )
#define SUFF5(c5,c4,c3,c2,c1) ( len>=5 && W(1,c1) && W(2,c2) && W(3,c3) && W(4,c4) && W(5,c5) )
#define SUFF6(c6,c5,c4,c3,c2,c1) ( len>=6 && W(1,c1) && W(2,c2) && W(3,c3) && W(4,c4) && W(5,c5) && W(6,c6) )
#define SUFF7(c7,c6,c5,c4,c3,c2,c1) ( len>=7 && W(1,c1) && W(2,c2) && W(3,c3) && W(4,c4) && W(5,c5) && W(6,c6) && W(7,c7) )
#define SUFF3A(c3,c2) ( len>=3 && W(2,c2) && W(3,c3) )
#define SUFF4A(c4,c3,c2) ( len>=4 && W(2,c2) && W(3,c3) && W(4,c4) )
#define SUFF5A(c5,c4,c3,c2) ( len>=5 && W(2,c2) && W(3,c3) && W(4,c4) && W(5,c5) )
#define SUFF6A(c6,c5,c4,c3,c2) ( len>=6 && W(2,c2) && W(3,c3) && W(4,c4) && W(5,c5) && W(6,c6) )
#define SUFF7A(c7,c6,c5,c4,c3,c2) ( len>=6 && W(2,c2) && W(3,c3) && W(4,c4) && W(5,c5) && W(6,c6) && W(7,c7) )
///////////
// STEP 1A
///////////
#ifdef SNOWBALL2011
#define IED_ACTION { if ( len-->4 ) len--; }
#else
#define IED_ACTION { if ( len--!=4 ) len--; }
#endif
switch ( word[len-1] )
{
case 'd':
if ( word[len-3]=='i' && word[len-2]=='e' )
IED_ACTION
break;
case 's':
if ( SUFF4 ( 's', 's', 'e', 's' ) ) // faster that suff4a for some reason!
len -= 2;
else if ( word[len-3]=='i' && word[len-2]=='e' )
IED_ACTION
else if ( word[len-2]!='u' && word[len-2]!='s' )
{
#ifdef SNOWBALL2011
if ( first_vowel<=len-3 )
#endif
len--;
}
break;
}
///////////
// STEP 1B
///////////
i = 0;
switch ( word[len-1] )
{
case 'd':
if ( SUFF3A ( 'e', 'e' ) ) { if ( len-3>=r1 ) len--; break; }
if ( word[len-2]=='e' ) i = 2;
break;
case 'y':
if ( word[len-2]=='l' )
{
if ( SUFF5A ( 'e', 'e', 'd', 'l' ) ) { if ( len-5>=r1 ) len -= 3; break; }
if ( SUFF4A ( 'e', 'd', 'l' ) ) { i = 4; break; }
if ( SUFF5A ( 'i', 'n', 'g', 'l' ) ) { i = 5; break; }
}
break;
case 'g':
if ( SUFF3A ( 'i', 'n' ) ) i = 3;
break;
}
if ( i && first_vowel<len-i )
{
len -= i;
if ( SUFF2 ( 'a', 't' ) || SUFF2 ( 'b', 'l' ) || SUFF2 ( 'i', 'z' ) )
word[len++] = 'e';
else if ( len>=2 && word[len-1]==word[len-2] && stem_en_id ( word[len-1] ) )
len--;
else if ( ( len==2 && is_vowel(0) && !is_vowel(1) )
|| ( len==r1 && !is_vowel ( len-3 ) && is_vowel ( len-2 ) && !stem_en_ivwxy ( word[len-1] ) ) )
{
word[len++] = 'e';
}
}
///////////
// STEP 1C
///////////
if ( len>2
&& ( word[len-1]=='y' || word[len-1]=='Y' )
&& !is_vowel ( len-2 ) )
{
word[len-1] = 'i';
}
//////////
// STEP 2
//////////
if ( len-2>=r1 )
switch ( word[len-1] )
{
case 'i':
if ( len>=3 && ( W ( 2, 'c' ) || W ( 2, 'l' ) || W ( 2, 't' ) ) )
{
if ( SUFF4A ( 'e', 'n', 'c' ) ) { if ( len-4>=r1 ) word[len-1] = 'e'; break; }
if ( SUFF4A ( 'a', 'n', 'c' ) ) { if ( len-4>=r1 ) word[len-1] = 'e'; break; }
if ( SUFF4A ( 'a', 'b', 'l' ) ) { if ( len-4>=r1 ) word[len-1] = 'e'; break; }
if ( SUFF3A ( 'b', 'l' ) ) { if ( len-3>=r1 ) word[len-1] = 'e'; break; }
if ( SUFF5A ( 'e', 'n', 't', 'l' ) ) { if ( len-5>=r1 ) len -= 2; break; }
if ( SUFF5A ( 'a', 'l', 'i', 't' ) ) { if ( len-5>=r1 ) len -= 3; break; }
if ( SUFF5A ( 'o', 'u', 's', 'l' ) ) { if ( len-5>=r1 ) len -= 2; break; }
if ( SUFF5A ( 'i', 'v', 'i', 't' ) ) { if ( len-5>=r1 ) { word[len-3] = 'e'; len -= 2; } break; }
if ( SUFF6A ( 'b', 'i', 'l', 'i', 't' ) ) { if ( len-6>=r1 ) { word[len-5] = 'l'; word[len-4] = 'e'; len -= 3; } break; }
if ( SUFF5A ( 'f', 'u', 'l', 'l' ) ) { if ( len-5>=r1 ) len -= 2; break; }
if ( SUFF6A ( 'l', 'e', 's', 's', 'l' ) ) { if ( len-6>=r1 ) len -= 2; break; }
}
#ifdef SNOWBALL2011
if ( len-3>=r1 && SUFF3A ( 'o', 'g' ) && word[len-4]=='l' ) { len -= 1; break; }
#else
if ( len-3>=r1 && SUFF3A ( 'o', 'g' ) ) { len -= 1; break; }
#endif
if ( len-2>=r1 && word[len-2]=='l' )
len -= 2;
else
break;
if ( len-2>=r1 && SUFF2 ( 'a', 'l' ) )
{
len -= 2;
if ( len-5>=r1 && SUFF5 ( 'a', 't', 'i', 'o', 'n' ) )
{
len -= 3;
word[len++] = 'e';
break;
}
if ( SUFF4 ( 't', 'i', 'o', 'n' ) )
break;
len += 2;
} else
{
switch ( word[len-1] )
{
case 'b':
case 'c':
case 'd':
case 'e':
case 'g':
case 'h':
case 'k':
case 'm':
case 'n':
case 'r':
case 't':
break;
default:
len += 2;
break;
}
}
break;
case 'l':
if ( SUFF7A ( 'a', 't', 'i', 'o', 'n', 'a' ) ) { if ( len-7>=r1 ) { word[len-5] = 'e'; len -= 4; } break; }
if ( SUFF6A ( 't', 'i', 'o', 'n', 'a' ) ) { if ( len-6>=r1 ) len -= 2; break; }
break;
case 'm':
if ( SUFF5A ( 'a', 'l', 'i', 's' ) ) { if ( len-5>=r1 ) len -= 3; break; }
break;
case 'n':
if ( SUFF7A ( 'i', 'z', 'a', 't', 'i', 'o' ) ) { if ( len-7>=r1 ) { word[len-5] = 'e'; len -= 4; } break; }
if ( SUFF5A ( 'a', 't', 'i', 'o' ) ) { if ( len-5>=r1 ) { word[len-3] = 'e'; len -= 2; } break; }
break;
case 'r':
if ( SUFF4A ( 'i', 'z', 'e' ) ) { if ( len-4>=r1 ) len -= 1; break; }
if ( SUFF4A ( 'a', 't', 'o' ) ) { if ( len-4>=r1 ) { word[len-2] = 'e'; len -= 1; } break; }
break;
case 's':
if ( len-7>=r1 && (
SUFF7A ( 'f', 'u', 'l', 'n', 'e', 's' ) ||
SUFF7A ( 'o', 'u', 's', 'n', 'e', 's' ) ||
SUFF7A ( 'i', 'v', 'e', 'n', 'e', 's' ) ) )
{
len -= 4;
}
break;
}
//////////
// STEP 3
//////////
if ( len-3>=r1 )
switch ( word[len-1] )
{
case 'e':
if ( SUFF5A ( 'a', 'l', 'i', 'z' ) ) { if ( len-5>=r1 ) len -= 3; break; }
if ( SUFF5A ( 'i', 'c', 'a', 't' ) ) { if ( len-5>=r1 ) len -= 3; break; }
#ifdef SNOWBALL2011
if ( SUFF5A ( 'a', 't', 'i', 'v' ) ) { if ( len-5>=r2 ) len -= 5; break; }
#else
if ( SUFF5A ( 'a', 't', 'i', 'v' ) ) { if ( len-5>=r1 ) len -= 5; break; }
#endif
break;
case 'i':
if ( SUFF5A ( 'i', 'c', 'i', 't' ) ) { if ( len-5>=r1 ) len -= 3; break; }
break;
case 'l':
if ( SUFF4A ( 'i', 'c', 'a' ) ) { if ( len-4>=r1 ) len -= 2; break; }
if ( SUFF3A ( 'f', 'u' ) ) { len -= 3; break; }
break;
case 's':
if ( SUFF4A ( 'n', 'e', 's' ) ) { if ( len-4>=r1 ) len -= 4; break; }
break;
}
//////////
// STEP 4
//////////
if ( len-2>=r2 )
switch ( word[len-1] )
{
case 'c':
if ( word[len-2]=='i' ) len -= 2; // -ic
break;
case 'e':
if ( len-3>=r2 )
{
if ( SUFF4A ( 'a', 'n', 'c' ) ) { if ( len-4>=r2 ) len -= 4; break; }
if ( SUFF4A ( 'e', 'n', 'c' ) ) { if ( len-4>=r2 ) len -= 4; break; }
if ( SUFF4A ( 'a', 'b', 'l' ) ) { if ( len-4>=r2 ) len -= 4; break; }
if ( SUFF4A ( 'i', 'b', 'l' ) ) { if ( len-4>=r2 ) len -= 4; break; }
if ( SUFF3A ( 'a', 't' ) ) { len -= 3; break; }
if ( SUFF3A ( 'i', 'v' ) ) { len -= 3; break; }
if ( SUFF3A ( 'i', 'z' ) ) { len -= 3; break; }
}
break;
case 'i':
if ( SUFF3A ( 'i', 't' ) ) { if ( len-3>=r2 ) len -= 3; break; }
break;
case 'l':
if ( word[len-2]=='a' ) len -= 2; // -al
break;
case 'm':
if ( SUFF3A ( 'i', 's' ) ) { if ( len-3>=r2 ) len -= 3; break; }
break;
case 'n':
if ( len-3>=r2 && SUFF3 ( 'i', 'o', 'n' ) && ( word[len-4]=='t' || word[len-4]=='s' ) )
len -= 3;
break;
case 'r':
if ( word[len-2]=='e' ) len -= 2; // -er
break;
case 's':
if ( SUFF3A ( 'o', 'u' ) ) { if ( len-3>=r2 ) len -= 3; break; }
break;
case 't':
if ( word[len-2]=='n' )
{
if ( SUFF5A ( 'e', 'm', 'e', 'n' ) ) { if ( len-5>=r2 ) len -= 5; break; }
if ( SUFF4A ( 'm', 'e', 'n' ) ) { if ( len-4>=r2 ) len -= 4; break; }
if ( SUFF3A ( 'a', 'n' ) ) { if ( len-3>=r2 ) len -= 3; break; }
if ( SUFF3A ( 'e', 'n' ) ) { if ( len-3>=r2 ) len -= 3; break; }
}
break;
}
//////////
// STEP 5
//////////
#ifdef SNOWBALL2011
if ( len>r2 && word[len-1]=='l' && word[len-2]=='l' )
len--;
else
#endif
while ( word[len-1]=='e' )
{
if ( len>r2 )
{
len--;
break;
}
if ( len<=r1 )
break;
if ( len>3 && !is_vowel ( len-4 ) && is_vowel ( len-3 ) && !stem_en_ivwxy ( word[len-2] ) )
break;
if ( len==3 && is_vowel(0) && !is_vowel(1) )
break;
len--;
break;
}
#ifndef SNOWBALL2011
if ( len>r2 && word[len-1]=='l' && word[len-2]=='l' )
len--;
#endif
////////////
// FINALIZE
////////////
word[len] = 0;
if ( has_Y )
for ( i=0; i<len; i++ )
if ( word[i]=='Y' )
word[i] = 'y';
}
| 15,122
|
C++
|
.cpp
| 474
| 28.626582
| 125
| 0.460663
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,943
|
killlist.cpp
|
manticoresoftware_manticoresearch/src/killlist.cpp
|
//
// Copyright (c) 2018-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "killlist.h"
#include "sphinxint.h"
//////////////////////////////////////////////////////////////////////////
bool DeadRowMap_c::HasDead() const
{
return m_bHaveDead;
}
DWORD DeadRowMap_c::GetNumDeads () const
{
if ( m_iNumDeads<0 )
m_iNumDeads = HasDead () ? CountDeads () : 0;
return (DWORD)m_iNumDeads;
}
bool DeadRowMap_c::Set ( RowID_t tRowID, DWORD * pData )
{
if ( tRowID==INVALID_ROWID )
return false;
assert ( tRowID < m_uRows );
DWORD * pDword = pData + (tRowID>>5);
DWORD uMask = 1UL<<( tRowID&31 );
#ifdef HAVE_SYNC_FETCH
DWORD uPrev = __sync_fetch_and_or ( pDword, uMask );
#elif _WIN32
DWORD uPrev = _InterlockedOr ( (long*)pDword, (long)uMask );
#else
ScopedMutex_t tLock ( m_tLock );
DWORD uPrev = *pDword;
*pDword |= uMask;
#endif
bool bSet = !( uPrev & uMask );
m_bHaveDead |= bSet;
if ( bSet && m_iNumDeads>=0 )
++m_iNumDeads;
return bSet;
}
static DWORD CountBits ( const VecTraits_T<DWORD>& dData )
{
DWORD U = 0;
dData.Apply ( [&U] ( DWORD uData ) { U += sphBitCount ( uData ); } );
return U;
}
void DeadRowMap_c::CheckForDead ( const DWORD * pData, const DWORD * pDataEnd )
{
m_bHaveDead = false;
while ( pData<pDataEnd && !m_bHaveDead )
m_bHaveDead |= !!*pData++;
if ( !m_bHaveDead )
m_iNumDeads = 0;
else if ( pData==pDataEnd )
m_iNumDeads = sphBitCount ( *( pData-1 ) );
}
//////////////////////////////////////////////////////////////////////////
DeadRowMap_Ram_c::DeadRowMap_Ram_c ( DWORD uRows )
{
Reset ( uRows );
}
bool DeadRowMap_Ram_c::Set ( RowID_t tRowID )
{
return DeadRowMap_c::Set ( tRowID, m_dData.Begin() );
}
bool DeadRowMap_Ram_c::IsSet ( RowID_t tRowID ) const
{
return DeadRowMap_c::IsSet ( tRowID, m_dData.Begin() );
}
int64_t DeadRowMap_Ram_c::GetLengthBytes() const
{
return m_dData.GetLengthBytes64();
}
uint64_t DeadRowMap_Ram_c::GetCoreSize () const
{
return m_dData.GetLengthBytes64 ();
}
void DeadRowMap_Ram_c::Reset ( DWORD uRows )
{
m_uRows = uRows;
m_dData.Reset ( (uRows+31)/32 );
m_dData.Fill(0);
m_bHaveDead = false;
m_iNumDeads = 0;
}
void DeadRowMap_Ram_c::Load ( DWORD uRows, CSphReader & tReader, CSphString & sError )
{
m_uRows = uRows;
m_dData.Reset ( (m_uRows+31)/32 );
if ( uRows & 0x1F )
m_dData[m_dData.GetLength()-1] = 0; // ensure tail bits after the end are zeroed
tReader.GetBytes ( m_dData.Begin(), m_dData.GetLength()*sizeof(m_dData[0]) );
m_iNumDeads = -1;
CheckForDead ( m_dData.Begin(), m_dData.Begin()+m_dData.GetLength() );
}
void DeadRowMap_Ram_c::Save ( CSphWriter & tWriter ) const
{
tWriter.PutBytes ( m_dData.Begin(), m_dData.GetLength()*sizeof(m_dData[0]) );
}
DWORD DeadRowMap_Ram_c::CountDeads () const
{
return CountBits ( m_dData );
}
DWORD DeadRowMap_Ram_c::GetNumAlive() const
{
return m_uRows-GetNumDeads ();
}
//////////////////////////////////////////////////////////////////////////
DeadRowMap_Disk_c::~DeadRowMap_Disk_c()
{
CSphString sError;
Flush ( true, sError );
}
bool DeadRowMap_Disk_c::Set ( RowID_t tRowID )
{
return DeadRowMap_c::Set ( tRowID, m_tData.GetWritePtr() );
}
bool DeadRowMap_Disk_c::Flush ( bool bWaitComplete, CSphString & sError ) const
{
return m_tData.Flush ( bWaitComplete, sError );
}
bool DeadRowMap_Disk_c::Prealloc ( DWORD uRows, const CSphString & sFilename, CSphString & sError )
{
// we'll reset this flag after preread
m_bHaveDead = true;
m_uRows = uRows;
return m_tData.Setup ( sFilename.cstr(), sError, true );
}
void DeadRowMap_Disk_c::Preread ( const char * sIndexName, const char * sFor, bool bMlock )
{
m_iNumDeads = PrereadMappingCountingBits ( sIndexName, sFor, bMlock, false, m_tData );
m_bHaveDead = m_iNumDeads>0;
}
void DeadRowMap_Disk_c::Dealloc()
{
m_tData.Reset();
}
int64_t DeadRowMap_Disk_c::GetLengthBytes() const
{
return m_tData.GetLengthBytes64();
}
uint64_t DeadRowMap_Disk_c::GetCoreSize () const
{
return m_tData.GetCoreSize();
}
DWORD DeadRowMap_Disk_c::CountDeads () const
{
return CountBits ( m_tData );
}
//////////////////////////////////////////////////////////////////////////
bool WriteDeadRowMap ( const CSphString & sFilename, DWORD uTotalDocs, CSphString & sError )
{
// empty dead row map
CSphWriter tRowMapWriter;
if ( !tRowMapWriter.OpenFile ( sFilename, sError ) )
return false;
int nEntries = int(( uTotalDocs+31 ) / 32);
for ( int i=0; i < nEntries; ++i )
tRowMapWriter.PutDword(0);
tRowMapWriter.CloseFile();
if ( tRowMapWriter.IsError() )
{
sError.SetSprintf ( "error writing row map to %s", sFilename.cstr() );
return false;
}
return true;
}
| 4,951
|
C++
|
.cpp
| 171
| 27.099415
| 99
| 0.66582
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,944
|
datareader.cpp
|
manticoresoftware_manticoresearch/src/datareader.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "datareader.h"
#include "sphinxint.h"
#include "fileutils.h"
//////////////////////////////////////////////////////////////////////////
inline static ESphQueryState StateByKind ( DataReaderFactory_c::Kind_e eKind )
{
switch ( eKind )
{
case DataReaderFactory_c::DOCS: return SPH_QSTATE_READ_DOCS;
case DataReaderFactory_c::HITS: return SPH_QSTATE_READ_HITS;
default: return SPH_QSTATE_IO;
}
}
//////////////////////////////////////////////////////////////////////////
class FileBlockReader_c : public FileBlockReader_i
{
public:
explicit FileBlockReader_c ( const char * szFileName )
: m_szFileName ( szFileName )
{}
RowID_t UnzipRowid() override { return UnzipInt (); }
SphWordID_t UnzipWordid() override { return UnzipOffset (); }
protected:
const char * m_szFileName = nullptr;
};
//////////////////////////////////////////////////////////////////////////
// imitate CSphReader but fully in memory (intended to be used with mmap)
class ThinMMapReader_c final : public FileBlockReader_c
{
public:
SphOffset_t GetPos () const final
{
if ( !m_pPointer )
return 0;
assert ( m_pBase );
return m_pPointer - m_pBase;
}
void SeekTo ( SphOffset_t iPos, int /*iSizeHint*/ ) final
{
m_pPointer = m_pBase + iPos;
}
void GetBytes ( BYTE * pData, int iSize ) final;
int GetBytesZerocopy ( const BYTE *& pData, int iMax ) final;
DWORD GetDword() final;
SphOffset_t GetOffset() final;
DWORD UnzipInt () final;
uint64_t UnzipOffset () final;
void Reset () final
{
m_pPointer = m_pBase;
}
protected:
~ThinMMapReader_c() final {}
private:
friend class MMapFactory_c;
const BYTE * m_pBase = nullptr;
const BYTE * m_pPointer = nullptr;
SphOffset_t m_iSize = 0;
ThinMMapReader_c ( const BYTE * pArena, SphOffset_t iSize, const char * sFileName )
: FileBlockReader_c ( sFileName )
{
m_pPointer = m_pBase = pArena;
m_iSize = iSize;
}
BYTE GetByte() override
{
auto iPos = m_pPointer - m_pBase;
if ( iPos>=0 && iPos<m_iSize )
return *m_pPointer++;
sphWarning( "INTERNAL: out-of-range in ThinMMapReader_c: trying to read '%s' at " INT64_FMT ", from mmap of "
INT64_FMT ", query most probably would FAIL; report the fact to dev!",
( m_szFileName ? m_szFileName : "" ), int64_t(iPos), int64_t(m_iSize) );
return 0; // it's better then crash because of unexpected read out-of-range (file reader does the same there)
}
};
void ThinMMapReader_c::GetBytes ( BYTE * pData, int iSize )
{
auto iPos = m_pPointer - m_pBase;
if ( iPos>=0 && iPos+iSize<=m_iSize )
{
memcpy ( pData, m_pPointer, iSize );
m_pPointer += iSize;
return;
}
sphWarning ( "INTERNAL: out-of-range in ThinMMapReader_c: trying to read %d bytes from '%s' at " INT64_FMT ", from mmap of " INT64_FMT ", query most probably would FAIL; report the fact to dev!",
iSize, ( m_szFileName ? m_szFileName : "" ), int64_t(iPos), int64_t(m_iSize) );
}
int ThinMMapReader_c::GetBytesZerocopy ( const BYTE *& pData, int iMax )
{
if ( m_pPointer+iMax > m_pBase+m_iSize )
{
pData = m_pPointer;
return 0;
}
pData = m_pPointer;
m_pPointer += iMax;
return iMax;
}
DWORD ThinMMapReader_c::GetDword()
{
DWORD tRes;
GetBytes ( (BYTE*)&tRes, sizeof(tRes) );
return tRes;
}
SphOffset_t ThinMMapReader_c::GetOffset()
{
SphOffset_t tRes;
GetBytes ( (BYTE*)&tRes, sizeof(tRes) );
return tRes;
}
DWORD ThinMMapReader_c::UnzipInt()
{
return UnzipValueBE<DWORD> ( [this]() mutable { return GetByte(); } );
}
uint64_t ThinMMapReader_c::UnzipOffset()
{
return UnzipValueBE<uint64_t> ( [this]() mutable { return GetByte(); } );
}
//////////////////////////////////////////////////////////////////////////
class DirectFileReader_c final : public FileBlockReader_c, protected FileReader_c
{
friend class DirectFactory_c;
public:
void SeekTo ( SphOffset_t iPos, int iSizeHint ) final { FileReader_c::SeekTo ( iPos, iSizeHint ); }
void GetBytes ( BYTE * pData, int iSize ) final { FileReader_c::GetBytes ( pData, iSize ); }
int GetBytesZerocopy ( const BYTE *& pData, int iMax ) final { return FileReader_c::GetBytesZerocopy ( &pData, iMax ); }
SphOffset_t GetPos () const final { return FileReader_c::GetPos(); }
BYTE GetByte() final { return FileReader_c::GetByte(); }
DWORD GetDword () final { return FileReader_c::GetDword(); }
SphOffset_t GetOffset() final { return FileReader_c::GetOffset(); }
DWORD UnzipInt() final { return FileReader_c::UnzipInt(); }
uint64_t UnzipOffset() final { return FileReader_c::UnzipOffset(); }
void Reset() final { FileReader_c::Reset(); }
protected:
explicit DirectFileReader_c ( BYTE * pBuf, int iSize, const char * szFileName )
: FileBlockReader_c ( szFileName )
, FileReader_c ( pBuf, iSize )
{}
~DirectFileReader_c() final {}
};
//////////////////////////////////////////////////////////////////////////
// producer of readers which access by Seek + Read
class DirectFactory_c final : public DataReaderFactory_c
{
public:
DirectFactory_c ( const CSphString & sFile, CSphString & sError, ESphQueryState eState, int iReadBuffer, int iReadUnhinted )
: m_eWorkState ( eState )
, m_iReadBuffer ( iReadBuffer )
, m_iReadUnhinted ( iReadUnhinted )
{
SetValid ( m_dReader.Open ( sFile, sError ) );
}
uint64_t GetMappedsize () const final
{
return 0;
}
uint64_t GetCoresize () const final
{
return 0;
}
SphOffset_t GetFilesize () const final
{
return m_dReader.GetFilesize();
}
SphOffset_t GetPos () const final
{
return m_iPos;
}
void SeekTo ( SphOffset_t iPos ) final
{
m_iPos = iPos;
}
// returns depended reader sharing same FD as maker
FileBlockReader_c * MakeReader ( BYTE * pBuf, int iSize ) final
{
auto pFileReader = new DirectFileReader_c ( pBuf, iSize, m_dReader.GetFilename().cstr() );
pFileReader->SetFile ( m_dReader.GetFD(), m_dReader.GetFilename().cstr() );
pFileReader->SetBuffers ( m_iReadBuffer, m_iReadUnhinted );
if ( m_iPos )
pFileReader->SeekTo ( m_iPos, READ_NO_SIZE_HINT );
pFileReader->m_pProfile = m_dReader.m_pProfile;
pFileReader->m_eProfileState = m_eWorkState;
return pFileReader;
}
void SetProfile ( QueryProfile_c * pProfile ) final
{
m_dReader.m_pProfile = pProfile;
}
protected:
~DirectFactory_c() final {} // d-tr only by Release
private:
CSphAutoreader m_dReader;
ESphQueryState m_eWorkState;
SphOffset_t m_iPos = 0;
int m_iReadBuffer = 0;
int m_iReadUnhinted = 0;
};
//////////////////////////////////////////////////////////////////////////
// producer of readers which access by MMap
class MMapFactory_c final : public DataReaderFactory_c
{
public:
MMapFactory_c ( const CSphString & sFile, CSphString & sError, FileAccess_e eAccess )
{
SetValid ( m_tBackendFile.Setup ( sFile, sError ) );
if ( eAccess==FileAccess_e::MLOCK )
m_tBackendFile.MemLock( sError );
}
uint64_t GetMappedsize () const final
{
return m_tBackendFile.GetLengthBytes();
}
uint64_t GetCoresize () const final
{
return m_tBackendFile.GetCoreSize();
}
SphOffset_t GetFilesize () const final
{
return m_tBackendFile.GetLength64 ();
}
SphOffset_t GetPos () const final
{
return m_iPos;
}
void SeekTo ( SphOffset_t iPos ) final
{
m_iPos = iPos;
}
// returns depended reader sharing same mmap as maker
FileBlockReader_c * MakeReader ( BYTE *, int ) final
{
auto pReader = new ThinMMapReader_c ( m_tBackendFile.GetReadPtr(),
m_tBackendFile.GetLength64(), m_tBackendFile.GetFileName() );
if ( m_iPos )
pReader->SeekTo ( m_iPos, 0 );
return pReader;
}
protected:
~MMapFactory_c() final {} // d-tr only by Release
private:
CSphMappedBuffer<BYTE> m_tBackendFile;
SphOffset_t m_iPos = 0;
};
//////////////////////////////////////////////////////////////////////////
DataReaderFactory_c * NewProxyReader ( const CSphString & sFile, CSphString & sError, DataReaderFactory_c::Kind_e eKind, int iReadBuffer, FileAccess_e eAccess )
{
auto eState = StateByKind ( eKind );
CSphRefcountedPtr<DataReaderFactory_c> pReader;
if ( eAccess==FileAccess_e::FILE )
pReader = new DirectFactory_c ( sFile, sError, eState, iReadBuffer, GetUnhintedBuffer() );
else
pReader = new MMapFactory_c ( sFile, sError, eAccess );
if ( !pReader->IsValid() )
return nullptr;
return pReader.Leak();
}
| 8,649
|
C++
|
.cpp
| 265
| 30.411321
| 196
| 0.67356
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,945
|
query_status.cpp
|
manticoresoftware_manticoresearch/src/query_status.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "networking_daemon.h"
#include "query_status.h"
#if _WIN32
// Win-specific headers and calls
#include <io.h>
#else
// UNIX-specific headers and calls
#include <sys/wait.h>
#include <netdb.h> // not need on Mac
#include <netinet/in.h>
#endif
class NetOutputBuffer_c final : public GenericOutputBuffer_c
{
public:
explicit NetOutputBuffer_c ( int iSock );
void SendBufferImpl ( const VecTraits_T<BYTE> & dData );
bool SendBuffer ( const VecTraits_T<BYTE> & dData ) final
{
SendBufferImpl ( dData ); return true;
}
void SetWTimeoutUS ( int64_t iTimeoutUS ) final {}
int64_t GetWTimeoutUS () const final { return 0ll; }
int64_t GetTotalSent() const final { return 0ll; }
private:
int m_iSock; ///< my socket
};
/////////////////////////////////////////////////////////////////////////////
NetOutputBuffer_c::NetOutputBuffer_c( int iSock )
: m_iSock( iSock )
{
assert ( m_iSock>0 );
}
void NetOutputBuffer_c::SendBufferImpl ( const VecTraits_T<BYTE> & dData )
{
if ( m_bError )
return;
int64_t iLen = dData.GetLength64 ();
if ( !iLen )
return;
if ( sphInterrupted () )
sphLogDebug( "SIGTERM in NetOutputBuffer::Flush" );
auto* pBuffer = ( const char* ) dData.Begin();
CSphScopedProfile tProf ( m_pProfile, SPH_QSTATE_NET_WRITE );
const int64_t tmMaxTimer = sphMicroTimer() + S2US * g_iWriteTimeoutS; // in microseconds
while ( !m_bError )
{
auto iRes = sphSockSend ( m_iSock, pBuffer, iLen );
if ( iRes<0 )
{
int iErrno = sphSockGetErrno();
if ( iErrno==EINTR ) // interrupted before any data was sent; just loop
continue;
if ( iErrno!=EAGAIN && iErrno!=EWOULDBLOCK )
{
m_sError.SetSprintf ( "send() failed: %d: %s", iErrno, sphSockError( iErrno ));
sphWarning( "%s", m_sError.cstr());
m_bError = true;
break;
}
} else
{
pBuffer += iRes;
iLen -= iRes;
if ( iLen==0 )
break;
}
// wait until we can write
int64_t tmMicroLeft = tmMaxTimer - sphMicroTimer();
iRes = 0;
if ( tmMicroLeft>0 )
iRes = sphPoll( m_iSock, tmMicroLeft, true );
if ( !iRes ) // timeout
{
m_sError = "timed out while trying to flush network buffers";
sphWarning( "%s", m_sError.cstr());
m_bError = true;
break;
}
if ( iRes<0 )
{
int iErrno = sphSockGetErrno();
if ( iErrno==EINTR )
break;
m_sError.SetSprintf ( "sphPoll() failed: %d: %s", iErrno, sphSockError( iErrno ));
sphWarning( "%s", m_sError.cstr());
m_bError = true;
break;
}
assert ( iRes>0 );
}
}
/// simple network request buffer
// todo! remove in favour of async buf
// the *only* usecase for now is legacy QueryStatus() in searchd.cpp
class NetInputBuffer_c : private LazyVector_T<BYTE>, public InputBuffer_c
{
using STORE = LazyVector_T<BYTE>;
public:
explicit NetInputBuffer_c ( int iSock );
bool ReadFrom ( int iLen, int iTimeout, bool bIntr=false, bool bAppend=false );
bool IsIntr () const { return m_bIntr; }
using InputBuffer_c::HasBytes;
private:
static const int NET_MINIBUFFER_SIZE = STORE::iSTATICSIZE;
int m_iSock;
bool m_bIntr = false;
};
/////////////////////////////////////////////////////////////////////////////
static int RecvNBChunk( int iSock, char *& pBuf, int & iLeftBytes )
{
// try to receive next chunk
auto iRes = sphSockRecv ( iSock, pBuf, iLeftBytes );
if ( iRes>0 )
{
pBuf += iRes;
iLeftBytes -= iRes;
}
return ( int ) iRes;
}
static int sphSockRead ( int iSock, void * buf, int iLen, int iReadTimeout, bool bIntr )
{
assert ( iLen>0 );
int64_t tmMaxTimer = sphMicroTimer() + I64C( 1000000 ) * Max( 1, iReadTimeout ); // in microseconds
int iLeftBytes = iLen; // bytes to read left
auto pBuf = ( char* ) buf;
int iErr = 0;
int iRes = -1;
while ( iLeftBytes>0 )
{
int64_t tmMicroLeft = tmMaxTimer - sphMicroTimer();
if ( tmMicroLeft<=0 )
break; // timed out
#if _WIN32
// Windows EINTR emulation
// Ctrl-C will not interrupt select on Windows, so let's handle that manually
// forcibly limit select() to 100 ms, and check flag afterwards
if ( bIntr )
tmMicroLeft = Min ( tmMicroLeft, 100000 );
#endif
// wait until there is data
iRes = sphPoll( iSock, tmMicroLeft );
// if there was EINTR, retry
// if any other error, bail
if ( iRes==-1 )
{
// only let SIGTERM (of all them) to interrupt, and only if explicitly allowed
iErr = sphSockGetErrno();
if ( iErr==EINTR )
{
if ( !( sphInterrupted () && bIntr ))
continue;
sphLogDebug( "sphSockRead: select got SIGTERM, exit -1" );
}
return -1;
}
// if there was a timeout, report it as an error
if ( iRes==0 )
{
#if _WIN32
// Windows EINTR emulation
if ( bIntr )
{
// got that SIGTERM
if ( sphInterrupted() )
{
sphLogDebug ( "sphSockRead: got SIGTERM emulation on Windows, exit -1" );
sphSockSetErrno ( EINTR );
return -1;
}
// timeout might not be fully over just yet, so re-loop
continue;
}
#endif
sphSockSetErrno( ETIMEDOUT );
return -1;
}
// try to receive next chunk
iRes = RecvNBChunk( iSock, pBuf, iLeftBytes );
// if there was eof, we're done
if ( !iRes )
{
sphSockSetErrno( ECONNRESET );
return -1;
}
// if there was EINTR, retry
// if any other error, bail
if ( iRes==-1 )
{
// only let SIGTERM (of all them) to interrupt, and only if explicitly allowed
iErr = sphSockGetErrno();
if ( iErr==EINTR )
{
if ( !( sphInterrupted () && bIntr ))
continue;
sphLogDebug( "sphSockRead: select got SIGTERM, exit -1" );
}
return -1;
}
// avoid partial buffer loss in case of signal during the 2nd (!) read
bIntr = false;
}
// if there was a timeout, report it as an error
if ( iLeftBytes!=0 )
{
sphSockSetErrno( ETIMEDOUT );
return -1;
}
return iLen;
}
NetInputBuffer_c::NetInputBuffer_c( int iSock )
: STORE( NET_MINIBUFFER_SIZE ), InputBuffer_c( m_pData, NET_MINIBUFFER_SIZE ), m_iSock( iSock )
{
Resize( 0 );
}
bool NetInputBuffer_c::ReadFrom( int iLen, int iTimeout, bool bIntr, bool bAppend )
{
int iTail = bAppend ? m_iLen : 0;
m_bIntr = false;
if ( !IsLessMaxPacket ( iLen ) )
return false;
if ( m_iSock<0 )
{
SetError ( "reading from invalid socket %d", m_iSock );
return false;
}
int iOff = int ( m_pCur - m_pBuf );
Resize( m_iLen );
Reserve( iTail + iLen );
BYTE* pBuf = m_pData + iTail;
m_pBuf = m_pData;
m_pCur = bAppend ? m_pData + iOff : m_pData;
int iGot = sphSockRead( m_iSock, pBuf, iLen, iTimeout, bIntr );
if ( sphInterrupted () )
{
SetError ( "NetInputBuffer_c::ReadFrom: got SIGTERM, return false" );
sphLogDebugv ( "%s", GetErrorMessage().cstr() );
m_bIntr = true;
return false;
}
if ( iGot!=iLen )
SetError ( "wrong size read %d(%d)", iGot, iLen );
m_bIntr = ( GetError() && ( sphSockPeekErrno()==EINTR ) );
m_iLen = ( GetError() ? 0 : iTail + iLen );
return !GetError();
}
// fixme! refactor to common flavour
void QueryStatus ( CSphVariant * v ) REQUIRES ( MainThread )
{
char sBuf [ SPH_ADDRESS_SIZE ];
char sListen [ 256 ];
CSphVariant tListen;
if ( !v )
{
snprintf ( sListen, sizeof ( sListen ), "127.0.0.1:%d:sphinx", SPHINXAPI_PORT );
tListen = CSphVariant ( sListen );
v = &tListen;
}
for ( ; v; v = v->m_pNext )
{
ListenerDesc_t tDesc = ParseListener ( v->cstr() );
if ( tDesc.m_eProto!=Proto_e::SPHINX )
continue;
int iSock = -1;
#if !_WIN32
if ( !tDesc.m_sUnix.IsEmpty() )
{
// UNIX connection
struct sockaddr_un uaddr;
size_t len = strlen ( tDesc.m_sUnix.cstr() );
if ( len+1 > sizeof(uaddr.sun_path ) )
sphFatal ( "UNIX socket path is too long (len=%d)", (int)len );
memset ( &uaddr, 0, sizeof(uaddr) );
uaddr.sun_family = AF_UNIX;
memcpy ( uaddr.sun_path, tDesc.m_sUnix.cstr(), len+1 );
iSock = socket ( AF_UNIX, SOCK_STREAM, 0 );
if ( iSock<0 )
sphFatal ( "failed to create UNIX socket: %s", sphSockError() );
if ( connect ( iSock, (struct sockaddr*)&uaddr, sizeof(uaddr) )<0 )
{
sphWarning ( "failed to connect to unix://%s: %s\n", tDesc.m_sUnix.cstr(), sphSockError() );
sphSockClose ( iSock );
continue;
}
} else
#endif
{
// TCP connection
struct sockaddr_in sin;
memset ( &sin, 0, sizeof(sin) );
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ( tDesc.m_uIP==htonl ( INADDR_ANY ) )
? htonl ( INADDR_LOOPBACK )
: tDesc.m_uIP;
sin.sin_port = htons ( (short)tDesc.m_iPort );
iSock = (int)socket ( AF_INET, SOCK_STREAM, 0 );
if ( iSock<0 )
sphFatal ( "failed to create TCP socket: %s", sphSockError() );
sphSetSockNodelay ( iSock );
if ( connect ( iSock, (struct sockaddr*)&sin, sizeof(sin) )<0 )
{
sphWarning ( "failed to connect to %s:%d: %s\n", sphFormatIP ( sBuf, sizeof(sBuf), tDesc.m_uIP ), tDesc.m_iPort, sphSockError() );
sphSockClose ( iSock );
continue;
}
}
// send request
NetOutputBuffer_c tOut ( iSock );
tOut.SendDword ( SPHINX_CLIENT_VERSION );
{
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_STATUS, VER_COMMAND_STATUS );
tOut.SendInt ( 1 ); // dummy body
}
tOut.Flush ();
// get reply
NetInputBuffer_c tIn ( iSock );
if ( !tIn.ReadFrom ( 12, 5 ) ) // magic_header_size=12, magic_timeout=5
sphFatal ( "handshake failure (no response)" );
DWORD uVer = tIn.GetDword();
if ( uVer!=SPHINX_SEARCHD_PROTO && uVer!=0x01000000UL ) // workaround for all the revisions that sent it in host order...
sphFatal ( "handshake failure (unexpected protocol version=%u)", uVer );
if ( tIn.GetWord()!=SEARCHD_OK )
sphFatal ( "status command failed" );
if ( tIn.GetWord()!=VER_COMMAND_STATUS )
sphFatal ( "status command version mismatch" );
if ( !tIn.ReadFrom ( tIn.GetDword(), 5 ) ) // magic_timeout=5
sphFatal ( "failed to read status reply" );
fprintf ( stdout, "\nsearchd status\n--------------\n" );
int iRows = tIn.GetDword();
int iCols = tIn.GetDword();
for ( int i=0; i<iRows && !tIn.GetError(); i++ )
{
for ( int j=0; j<iCols && !tIn.GetError(); j++ )
{
fprintf ( stdout, "%s", tIn.GetString().scstr() );
fprintf ( stdout, ( j==0 ) ? ": " : " " );
}
fprintf ( stdout, "\n" );
}
// all done
sphSockClose ( iSock );
return;
}
sphFatal ( "failed to connect to daemon: please specify listen with sphinx protocol in your config file" );
}
| 10,800
|
C++
|
.cpp
| 355
| 27.388732
| 134
| 0.649219
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,946
|
sphinxmetaphone.cpp
|
manticoresoftware_manticoresearch/src/sphinxmetaphone.cpp
|
//
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxstd.h"
#include "sphinxint.h"
struct CurrentWord_t
{
BYTE * pWord;
int iLength;
int iLengthPadded;
};
static bool IsVowel ( BYTE c )
{
return c=='A' || c=='E' || c=='I' || c=='O' || c=='U' || c=='Y';
}
static bool SlavoGermanic ( BYTE * pString )
{
// OPTIMIZE!
char * szWord = (char *) pString;
if ( strstr ( szWord, "W" ) )
return true;
if ( strstr ( szWord, "K" ) )
return true;
if ( strstr ( szWord, "CZ" ) )
return true;
if ( strstr ( szWord, "WITZ" ) )
return true;
return false;
}
static bool StrAt ( const CurrentWord_t & Word, int iStart, int iLength, const char * szStr1 )
{
if ( iStart<0 || iStart>=Word.iLengthPadded )
return false;
char * szPos = (char *)Word.pWord + iStart;
return !strncmp ( szPos, szStr1, iLength );
}
static bool StrAt ( const CurrentWord_t & Word, int iStart, int iLength, const char * szStr1, const char * szStr2 )
{
if ( iStart<0 || iStart>=Word.iLengthPadded )
return false;
char * szPos = (char *)Word.pWord + iStart;
return !strncmp ( szPos, szStr1, iLength ) || !strncmp ( szPos, szStr2, iLength );
}
static bool StrAt ( const CurrentWord_t & Word, int iStart, int iLength, const char * szStr1, const char * szStr2, const char * szStr3 )
{
if ( iStart<0 || iStart>=Word.iLengthPadded )
return false;
char * szPos = (char *)Word.pWord + iStart;
return !strncmp ( szPos, szStr1, iLength ) || !strncmp ( szPos, szStr2, iLength ) || !strncmp ( szPos, szStr3, iLength );
}
static bool StrAt ( const CurrentWord_t & Word, int iStart, int iLength, const char * szStr1,
const char * szStr2, const char * szStr3, const char * szStr4 )
{
if ( iStart<0 || iStart>=Word.iLengthPadded )
return false;
char * szPos = (char *)Word.pWord + iStart;
return !strncmp ( szPos, szStr1, iLength ) || !strncmp ( szPos, szStr2, iLength ) || !strncmp ( szPos, szStr3, iLength )
|| !strncmp ( szPos, szStr4, iLength );
}
static bool StrAt ( const CurrentWord_t & Word, int iStart, int iLength, const char * szStr1,
const char * szStr2, const char * szStr3, const char * szStr4, const char * szStr5 )
{
if ( iStart<0 || iStart>=Word.iLengthPadded )
return false;
char * szPos = (char *)Word.pWord + iStart;
return !strncmp ( szPos, szStr1, iLength ) || !strncmp ( szPos, szStr2, iLength ) || !strncmp ( szPos, szStr3, iLength )
|| !strncmp ( szPos, szStr4, iLength ) || !strncmp ( szPos, szStr5, iLength );
}
static void MetaphAdd ( BYTE * pPrimary, BYTE * pSecondary, const char * szAddPrimary, const char * szAddSecondary )
{
strcat ( (char*)pPrimary, szAddPrimary ); // NOLINT
strcat ( (char*)pSecondary, szAddSecondary ); // NOLINT
}
#define ADD_RET(prim,sec,adv)\
{\
MetaphAdd ( sPrimary, sSecondary, prim, sec );\
return (adv);\
}
#define ADD(prim,sec)\
MetaphAdd ( sPrimary, sSecondary, prim, sec )
static int ProcessCode ( int iCode, int iCur, CurrentWord_t & Word, BYTE * sPrimary, BYTE * sSecondary )
{
BYTE * pWord = Word.pWord;
// codepoints, not bytes
int iAdvance = 1;
// bytes, not codepoints
int iLast = Word.iLength - 1;
switch ( iCode )
{
case 'A':
case 'E':
case 'I':
case 'O':
case 'U':
case 'Y':
// all init vowels now map to 'A'
if ( !iCur )
ADD ( "A", "A" );
break;
case 'B':
// "-mb", e.g", "dumb", already skipped over...
ADD_RET ( "P", "P", ( pWord[iCur+1]=='B' ) ? 2 : 1 )
case 0xC7:
case 0xE7:
ADD_RET ( "S", "S", 1 )
case 'C':
// various germanic
if ( iCur > 1 && !IsVowel ( pWord[iCur-2] ) && StrAt ( Word, iCur-1, 3, "ACH" )
&& ( pWord[iCur+2]!='I' && ( pWord[iCur+2]!='E' || StrAt ( Word, iCur-2, 6, "BACHER", "MACHER" ) ) ) )
ADD_RET ( "K", "K", 2 )
// special case 'caesar'
if ( iCur==0 && StrAt ( Word, 0, 6, "CAESAR" ) )
ADD_RET ( "S", "S", 2 )
// italian 'chianti'
if ( StrAt ( Word, iCur, 4, "CHIA" ) )
ADD_RET ( "K", "K", 2 )
if ( StrAt ( Word, iCur, 2, "CH" ) )
{
// find 'michael'
if ( iCur > 0 && StrAt ( Word, iCur, 4, "CHAE" ) )
ADD_RET ( "K", "X", 2 )
// greek roots e.g. 'chemistry', 'chorus'
if ( iCur==0
&& ( StrAt ( Word, iCur+1, 5, "HARAC", "HARIS" )
|| StrAt ( Word, iCur+1, 3, "HOR", "HYM", "HIA", "HEM" ) )
&& !StrAt ( Word, 0, 5, "CHORE" ) )
{
ADD_RET ( "K", "K", 2 )
}
// germanic, greek, or otherwise 'ch' for 'kh' sound
if ( ( StrAt ( Word, 0, 4, "VAN ", "VON " ) || StrAt ( Word, 0, 3, "SCH" ) )
// 'architect but not 'arch', 'orchestra', 'orchid'
|| StrAt ( Word, iCur-2, 6, "ORCHES", "ARCHIT", "ORCHID" )
|| StrAt ( Word, iCur+2, 1, "T", "S" )
|| ( ( StrAt ( Word, iCur-1, 1, "A", "O", "U", "E" ) || iCur==0 ) // e.g., 'wachtler', 'wechsler', but not 'tichner'
&& ( StrAt ( Word, iCur+2, 1, "L", "R", "N", "M" ) || StrAt ( Word, iCur+2, 1, "B", "H", "F", "V" )
|| StrAt ( Word, iCur+2, 1, "W", " " ) ) ) )
{
ADD ( "K", "K" );
} else
{
if ( iCur > 0 )
{
if ( StrAt ( Word, 0, 2, "MC" ) ) // e.g., "McHugh"
ADD ( "K", "K" );
else
ADD ( "X", "K" );
} else
ADD ( "X", "X" );
}
return 2;
}
// e.g, 'czerny'
if ( StrAt ( Word, iCur, 2, "CZ" ) && !StrAt ( Word, iCur-2, 4, "WICZ" ) )
ADD_RET ( "S", "X", 2 )
// e.g., 'focaccia'
if ( StrAt ( Word, iCur+1, 3, "CIA" ) )
ADD_RET ( "X", "X", 3 )
// double 'C', but not if e.g. 'McClellan'
if ( StrAt ( Word, iCur, 2, "CC" ) && !( iCur==1 && pWord[0]=='M' ) )
{
// 'bellocchio' but not 'bacchus'
if ( StrAt ( Word, iCur+2, 1, "I", "E", "H" ) && !StrAt ( Word, iCur+2, 2, "HU" ) )
{
// 'accident', 'accede' 'succeed'
if ( ( iCur==1 && pWord[iCur-1]=='A' ) || StrAt ( Word, iCur-1, 5, "UCCEE", "UCCES" ) )
ADD_RET ( "KS", "KS", 2 )
else // 'bacci', 'bertucci', other italian
ADD_RET ( "X", "X", 2 )
} else // Pierce's rule
ADD_RET ( "K", "K", 2 )
}
if ( StrAt ( Word, iCur, 2, "CK", "CG", "CQ" ) )
ADD_RET ( "K", "K", 2 )
if ( StrAt ( Word, iCur, 2, "CI", "CE", "CY" ) )
{
// italian vs. english
if ( StrAt ( Word, iCur, 3, "CIO", "CIE", "CIA" ) )
ADD_RET ( "S", "X", 2 )
else
ADD_RET ( "S", "S", 2 )
}
// else
ADD ( "K", "K" );
// name sent in 'mac caffrey', 'mac gregor
if ( StrAt ( Word, iCur+1, 2, " C", " Q", " G" ) )
return 3;
else
{
if ( StrAt ( Word, iCur+1, 1, "C", "K", "Q" ) && !StrAt ( Word, iCur+1, 2, "CE", "CI" ) )
return 2;
}
break;
case 'D':
if ( StrAt ( Word, iCur, 2, "DG" ) )
{
if ( StrAt ( Word, iCur+2, 1, "I", "E", "Y" ) ) // e.g. 'edge'
ADD_RET ( "J", "J", 3 )
else // e.g. 'edgar'
ADD_RET ( "TK", "TK", 2 )
}
if ( StrAt ( Word, iCur, 2, "DT", "DD" ) )
ADD_RET ( "T", "T", 2 )
// else
ADD_RET ( "T", "T", 1 )
case 'F':
ADD_RET ( "F", "F", pWord[iCur+1]=='F' ? 2 : 1 )
case 'G':
if ( pWord[iCur+1]=='H' )
{
if ( iCur > 0 && !IsVowel ( pWord[iCur-1] ) )
ADD_RET ( "K", "K", 2 )
if ( iCur < 3 )
{
// 'ghislane', ghiradelli
if ( iCur==0 )
{
if ( pWord[iCur+2]=='I' )
ADD_RET ( "J", "J", 2 )
else
ADD_RET ( "K", "K", 2 )
}
}
// Parker's rule (with some further refinements) - e.g., 'hugh'
if ( ( iCur > 1 && StrAt ( Word, iCur-2, 1, "B", "H", "D" ) )
|| ( iCur > 2 && StrAt ( Word, iCur-3, 1, "B", "H", "D" ) ) // e.g., 'bough'
|| ( iCur > 3 && StrAt ( Word, iCur-4, 1, "B", "H" ) ) ) // e.g., 'broughton'
return 2;
else
{
// e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'
if ( iCur > 2 && pWord[iCur-1]=='U' && StrAt ( Word, iCur-3, 1, "C", "G", "L", "R", "T" ) )
ADD ( "F", "F" );
else
if ( iCur > 0 && pWord[iCur-1]!='I' )
ADD ( "K", "K" );
return 2;
}
}
if ( pWord[iCur+1]=='N' )
{
if ( iCur==1 && IsVowel ( pWord[0] ) && !SlavoGermanic ( pWord ) )
ADD ( "KN", "N" );
else // not e.g. 'cagney'
if ( !StrAt ( Word, iCur+2, 2, "EY" ) && pWord[iCur+1]!='Y' && !SlavoGermanic ( pWord ) )
ADD ( "N", "KN" );
else
ADD ( "KN", "KN" );
return 2;
}
// 'tagliaro'
if ( StrAt ( Word, iCur+1, 2, "LI" ) && !SlavoGermanic ( pWord ) )
ADD_RET ( "KL", "L", 2 )
// -ges-,-gep-,-gel-, -gie- at beginning
if ( iCur==0 && ( pWord[iCur+1]=='Y' || StrAt ( Word, iCur+1, 2, "ES", "EP", "EB", "EL" )
|| StrAt ( Word, iCur+1, 2, "EY", "IB", "IL", "IN" ) || StrAt ( Word, iCur+1, 2, "IE", "EI", "ER" ) ) )
ADD_RET ( "K", "J", 2 )
// -ger-, -gy-
if ( ( StrAt ( Word, iCur+1, 2, "ER" ) || pWord[iCur+1]=='Y' ) && !StrAt ( Word, 0, 6, "DANGER", "RANGER", "MANGER" )
&& !StrAt ( Word, iCur-1, 1, "E", "I" ) && !StrAt ( Word, iCur-1, 3, "RGY", "OGY" ) )
ADD_RET ( "K", "J", 2 )
// italian e.g, 'biaggi'
if ( StrAt ( Word, iCur+1, 1, "E", "I", "Y" ) || StrAt ( Word, iCur-1, 4, "AGGI", "OGGI" ) )
{
// obvious germanic
if ( StrAt ( Word, 0, 4, "VAN ", "VON " ) || StrAt ( Word, 0, 3, "SCH" ) || StrAt ( Word, iCur+1, 2, "ET" ) )
ADD ( "K", "K" );
else
{
// always soft if french ending
if ( StrAt ( Word, iCur+1, 4, "IER " ) )
ADD ( "J", "J" );
else
ADD ( "J", "K" );
}
return 2;
}
ADD_RET ( "K", "K", pWord[iCur+1]=='G' ? 2 : 1 )
case 'H':
// only keep if first & before vowel or btw. 2 vowels
if ( ( iCur==0 || IsVowel ( pWord[iCur-1] ) ) && IsVowel ( pWord[iCur+1] ) )
ADD_RET ( "H", "H", 2 )
break; // also takes care of 'HH'
case 'J':
// obvious spanish, 'jose', 'san jacinto'
if ( StrAt ( Word, iCur, 4, "JOSE" ) || StrAt ( Word, 0, 4, "SAN " ) )
{
if ( ( iCur==0 && pWord[iCur+4]==' ' ) || StrAt ( Word, 0, 4, "SAN " ) )
ADD_RET ( "H", "H", 1 )
else
ADD_RET ( "J", "H", 1 )
}
if ( iCur==0 && !StrAt ( Word, iCur, 4, "JOSE" ) )
ADD ( "J", "A" ); // Yankelovich/Jankelowicz
else
{
// spanish pron. of e.g. 'bajador'
if ( ( iCur>0 && IsVowel ( pWord[iCur-1] ) )&& !SlavoGermanic ( pWord ) && ( pWord[iCur+1]=='A' || pWord[iCur+1]=='O' ) )
ADD ( "J", "H" );
else
{
if ( iCur==iLast )
ADD ( "J", "" );
else
if ( !StrAt ( Word, iCur+1, 1, "L", "T", "K", "S" )
&& !StrAt ( Word, iCur+1, 1, "N", "M", "B", "Z" )
&& !StrAt ( Word, iCur-1, 1, "S", "K", "L" ) )
{
ADD ( "J", "J" );
}
}
}
if ( pWord[iCur+1]=='J' ) // it could happen!
return 2;
break;
case 'K':
ADD_RET ( "K", "K", pWord[iCur+1]=='K' ? 2 : 1 )
case 'L':
if ( pWord[iCur+1]=='L' )
{
// spanish e.g. 'cabrillo', 'gallegos'
if ( ( iCur==iLast-2 && StrAt ( Word, iCur-1, 4, "ILLO", "ILLA", "ALLE" ) )
|| ( ( StrAt ( Word, iLast - 1, 2, "AS", "OS" ) || StrAt ( Word, iLast, 1, "A", "O" ) ) && StrAt ( Word, iCur-1, 4, "ALLE" ) ) )
ADD_RET ( "L", "", 2 )
iAdvance = 2;
}
ADD ( "L", "L" );
break;
case 'M':
ADD ( "M", "M" );
// 'dumb','thumb'
if ( ( StrAt ( Word, iCur-1, 3, "UMB" ) && ( iCur+1==iLast || StrAt ( Word, iCur+2, 2, "ER" ) ) ) || pWord[iCur+1]=='M' )
return 2;
break;
case 'N':
ADD_RET ( "N", "N", pWord[iCur+1]=='N' ? 2 : 1 )
case 0xD1:
case 0xF1:
ADD_RET ( "N", "N", 1 )
case 'P':
if ( pWord[iCur+1]=='H' )
ADD_RET ( "F", "F", 2 )
// also account for "campbell", "raspberry"
ADD_RET ( "P", "P", StrAt ( Word, iCur+1, 1, "P", "B" ) ? 2 : 1 )
case 'Q':
ADD_RET ( "K", "K", pWord[iCur+1]=='Q' ? 2 : 1 )
case 'R':
// french e.g. 'rogier', but exclude 'hochmeier'
if ( iCur==iLast && !SlavoGermanic ( pWord ) && StrAt ( Word, iCur-2, 2, "IE" ) && !StrAt ( Word, iCur-4, 2, "ME", "MA" ) )
ADD ( "", "R" );
else
ADD ( "R", "R" );
return pWord[iCur+1]=='R' ? 2 : 1;
case 'S':
// special cases 'island', 'isle', 'carlisle', 'carlysle'
if ( StrAt ( Word, iCur-1, 3, "ISL", "YSL" ) )
return 1;
// special case 'sugar-'
if ( iCur==0 && StrAt ( Word, iCur, 5, "SUGAR" ) )
ADD_RET ( "X", "S", 1 )
if ( StrAt ( Word, iCur, 2, "SH" ) )
{
// germanic
if ( StrAt ( Word, iCur+1, 4, "HEIM", "HOEK", "HOLM", "HOLZ" ) )
ADD_RET ( "S", "S", 2 )
else
ADD_RET ( "X", "X", 2 )
}
// italian & armenian
if ( StrAt ( Word, iCur, 3, "SIO", "SIA" ) || StrAt ( Word, iCur, 4, "SIAN" ) )
{
if ( !SlavoGermanic ( pWord ) )
ADD_RET ( "S", "X", 3 )
else
ADD_RET ( "S", "S", 3 )
}
// german & anglicisations, e.g. 'smith' match 'schmidt', 'snider' match 'schneider'
// also, -sz- in slavic language altho in hungarian it is pronounced 's'
if ( ( iCur==0 && StrAt ( Word, iCur+1, 1, "M", "N", "L", "W" ) ) || StrAt ( Word, iCur+1, 1, "Z" ) )
ADD_RET ( "S", "X", StrAt ( Word, iCur+1, 1, "Z" ) ? 2 : 1 )
if ( StrAt ( Word, iCur, 2, "SC" ) )
{
// Schlesinger's rule
if ( pWord[iCur+2]=='H' )
{
if ( StrAt ( Word, iCur+3, 2, "OO", "ER", "EN", "UY" )
|| StrAt ( Word, iCur+3, 2, "ED", "EM" ) ) // dutch origin, e.g. 'school', 'schooner'
{
// 'schermerhorn', 'schenker'
if ( StrAt ( Word, iCur+3, 2, "ER", "EN" ) )
ADD_RET ( "X", "SK", 3 )
else
ADD_RET ( "SK", "SK", 3 )
} else
{
if ( iCur==0 && !IsVowel ( pWord[3] ) && pWord[3]!='W' )
ADD_RET ( "X", "S", 3 )
else
ADD_RET ( "X", "X", 3 )
}
}
if ( StrAt ( Word, iCur+2, 1, "I", "E", "Y" ) )
ADD_RET ( "S", "S", 3 )
// else
ADD_RET ( "SK", "SK", 3 )
}
// french e.g. 'resnais', 'artois'
if ( iCur==iLast && StrAt ( Word, iCur-2, 2, "AI", "OI" ) )
ADD ( "", "S" );
else
ADD ( "S", "S" );
return StrAt ( Word, iCur+1, 1, "S", "Z" ) ? 2 : 1;
case 'T':
if ( StrAt ( Word, iCur, 4, "TION" ) )
ADD_RET ( "X", "X", 3 )
if ( StrAt ( Word, iCur, 3, "TIA", "TCH" ) )
ADD_RET ( "X", "X", 3 )
if ( StrAt ( Word, iCur, 2, "TH" ) || StrAt ( Word, iCur, 3, "TTH" ) )
{
// special case 'thomas', 'thames' or germanic
if ( StrAt ( Word, iCur+2, 2, "OM", "AM" ) || StrAt ( Word, 0, 4, "VAN ", "VON " ) || StrAt ( Word, 0, 3, "SCH" ) )
ADD_RET ( "T", "T", 2 )
else
ADD_RET ( "0", "T", 2 ) // yes, zero
}
ADD_RET ( "T", "T", StrAt ( Word, iCur+1, 1, "T", "D" ) ? 2 : 1 )
case 'V':
ADD_RET ( "F", "F", pWord[iCur+1]=='V' ? 2 : 1 )
case 'W':
// can also be in middle of word
if ( StrAt ( Word, iCur, 2, "WR" ) )
ADD_RET ( "R", "R", 2 )
if ( iCur==0 && ( IsVowel ( pWord[iCur+1] ) || StrAt ( Word, iCur, 2, "WH" ) ) )
{
// Wasserman should match Vasserman
if ( IsVowel ( pWord[iCur+1] ) )
ADD ( "A", "F" );
else // need Uomo to match Womo
ADD ( "A", "A" );
}
// Arnow should match Arnoff
if ( ( iCur==iLast && iCur > 0 && IsVowel ( pWord[iCur-1] ) ) || StrAt ( Word, iCur-1, 5, "EWSKI", "EWSKY", "OWSKI", "OWSKY" )
|| StrAt ( Word, 0, 3, "SCH" ) )
ADD_RET ( "", "F", 1 )
// polish e.g. 'filipowicz'
if ( StrAt ( Word, iCur, 4, "WICZ", "WITZ" ) )
ADD_RET ( "TS", "FX", 4 )
break;
case 'X':
// french e.g. breaux
if ( !( iCur==iLast && ( StrAt ( Word, iCur-3, 3, "IAU", "EAU" ) || StrAt ( Word, iCur-2, 2, "AU", "OU" ) ) ) )
ADD ( "KS", "KS" );
return ( pWord[iCur+1]=='C' || pWord[iCur+1]=='X' ) ? 2 : 1;
case 'Z':
// chinese pinyin e.g. 'zhao'
if ( pWord[iCur+1]=='H' )
ADD_RET ( "J", "J", 2 )
else
if ( StrAt ( Word, iCur+1, 2, "ZO", "ZI", "ZA" ) || ( SlavoGermanic ( pWord ) && ( iCur > 0 && pWord[iCur-1]!='T' ) ) )
MetaphAdd ( sPrimary, sSecondary, "S", "TS" );
else
MetaphAdd ( sPrimary, sSecondary, "S", "S" );
return pWord[iCur+1]=='Z' ? 2 : 1;
}
return iAdvance;
}
void stem_dmetaphone ( BYTE * pWord )
{
const int EXTRA_RESERVE = 16;
BYTE sOriginal [3*SPH_MAX_WORD_LEN+3+EXTRA_RESERVE];
BYTE sPrimary [3*SPH_MAX_WORD_LEN+3];
BYTE sSecondary [ 3*SPH_MAX_WORD_LEN+3 ];
auto iLength = (int) strlen ( (const char *)pWord );
memcpy ( sOriginal, pWord, iLength + 1 );
sPrimary[0] = '\0';
sSecondary[0] = '\0';
BYTE * pStart = sOriginal;
while ( *pStart )
{
if ( *pStart>='a' && *pStart<='z' )
*pStart = (BYTE) toupper ( *pStart );
++pStart;
}
strcat ( (char *) sOriginal, " " ); // NOLINT
int iAdvance = 0;
CurrentWord_t Word;
Word.pWord = sOriginal;
Word.iLength = iLength;
Word.iLengthPadded = (int) strlen ( (const char *)sOriginal );
// skip these when at start of word
if ( StrAt ( Word, 0, 2, "GN", "KN", "PN", "WR", "PS" ) )
iAdvance = 1;
// Initial 'X' is pronounced 'Z' e.g. 'Xavier'
if ( sOriginal[0]=='X' )
{
ADD ( "S", "S" ); // 'Z' maps to 'S'
iAdvance = 1;
}
const BYTE * pPtr = sOriginal;
const BYTE * pLastPtr = sOriginal;
int iCode = -1;
iCode = sphUTF8Decode ( pPtr );
while ( iCode!=0 )
{
int iCur = int ( pLastPtr-sOriginal );
if ( iCur>=iLength )
break;
for ( int i = 0; i < iAdvance; ++i )
{
pLastPtr = pPtr;
iCode = sphUTF8Decode ( pPtr );
}
if ( iCode<=0 )
break;
// unknown code: don't copy, just return
if ( iCode>128 && iCode!=0xC7 && iCode!=0xE7 && iCode!=0xD1 && iCode!=0xF1 )
return;
iAdvance = ProcessCode ( iCode, int ( pLastPtr-sOriginal ), Word, sPrimary, sSecondary );
}
if ( !pWord[0] || sPrimary [0] )
strcpy ( (char*)pWord, (char*)sPrimary ); // NOLINT
// TODO: handle secondary too
}
| 17,475
|
C++
|
.cpp
| 530
| 29.420755
| 136
| 0.520576
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| true
| false
|
16,947
|
chunksearchctx.cpp
|
manticoresoftware_manticoresearch/src/chunksearchctx.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "chunksearchctx.h"
#include "sphinxsort.h"
#include "queryprofile.h"
DiskChunkSearcherCtx_t::DiskChunkSearcherCtx_t ( Sorters_t & dSorters, CSphQueryResultMeta & tMeta )
: m_dSorters ( dSorters )
, m_tMeta ( tMeta )
{}
void DiskChunkSearcherCtx_t::MergeChild ( DiskChunkSearcherCtx_t tChild ) const
{
// sorting results
ARRAY_CONSTFOREACH ( i, m_dSorters )
if ( tChild.m_dSorters[i] )
tChild.m_dSorters[i]->MoveTo ( m_dSorters[i], false );
auto & tChildRes = tChild.m_tMeta;
// word statistics
m_tMeta.MergeWordStats ( tChildRes );
// other data (warnings, errors, etc.)
// errors
if ( !tChildRes.m_sError.IsEmpty ())
m_tMeta.m_sError = tChildRes.m_sError;
// warnings
if ( !tChildRes.m_sWarning.IsEmpty ())
m_tMeta.m_sWarning = tChildRes.m_sWarning;
// prediction counters
if ( m_tMeta.m_bHasPrediction )
m_tMeta.m_tStats.Add ( tChildRes.m_tStats );
// profiling
if ( tChildRes.m_pProfile )
m_tMeta.m_pProfile->AddMetric ( *tChildRes.m_pProfile );
m_tMeta.m_bTotalMatchesApprox |= tChildRes.m_bTotalMatchesApprox;
m_tMeta.m_tIteratorStats.Merge ( tChildRes.m_tIteratorStats );
}
bool DiskChunkSearcherCtx_t::IsClonable () const
{
return m_dSorters.all_of ( [] ( const ISphMatchSorter * p ) { return p->CanBeCloned (); } );
}
//////////////////////////////////////////////////////////////////////////
DiskChunkSearcherCloneCtx_t::DiskChunkSearcherCloneCtx_t ( const DiskChunkSearcherCtx_t & dParent )
{
m_dSorters.Resize ( dParent.m_dSorters.GetLength() );
ARRAY_FOREACH ( i, m_dSorters )
m_dSorters[i] = dParent.m_dSorters[i]->Clone();
m_tMeta.m_bHasPrediction = dParent.m_tMeta.m_bHasPrediction;
if ( dParent.m_tMeta.m_pProfile )
m_tMeta.m_pProfile = new QueryProfile_c;
}
DiskChunkSearcherCloneCtx_t::~DiskChunkSearcherCloneCtx_t()
{
m_dSorters.Apply ( [] ( ISphMatchSorter *& pSorter ) { SafeDelete ( pSorter ); } );
SafeDelete ( m_tMeta.m_pProfile );
}
DiskChunkSearcherCloneCtx_t::operator DiskChunkSearcherCtx_t()
{
return { m_dSorters, m_tMeta };
}
| 2,506
|
C++
|
.cpp
| 66
| 36
| 100
| 0.719305
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,948
|
exprgeodist.cpp
|
manticoresoftware_manticoresearch/src/exprgeodist.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "exprgeodist.h"
#include "sphinx.h"
#include "exprtraits.h"
#include <cmath>
class ExprGeodist_c : public ISphExpr
{
public:
ExprGeodist_c() = default;
bool Setup ( const CSphQuery & tQuery, const ISphSchema & tSchema, CSphString & sError );
float Eval ( const CSphMatch & tMatch ) const final;
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) final;
void Command ( ESphExprCommand eCmd, void * pArg ) final;
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final;
ISphExpr * Clone() const final;
protected:
CSphAttrLocator m_tGeoLatLoc;
CSphAttrLocator m_tGeoLongLoc;
float m_fGeoAnchorLat;
float m_fGeoAnchorLong;
CSphString m_sAttrLat;
CSphString m_sAttrLon;
};
bool ExprGeodist_c::Setup ( const CSphQuery & tQuery, const ISphSchema & tSchema, CSphString & sError )
{
if ( !tQuery.m_bGeoAnchor )
{
sError.SetSprintf ( "INTERNAL ERROR: no geoanchor, can not create geodist evaluator" );
return false;
}
int iLat = tSchema.GetAttrIndex ( tQuery.m_sGeoLatAttr.cstr() );
if ( iLat<0 )
{
sError.SetSprintf ( "unknown latitude attribute '%s'", tQuery.m_sGeoLatAttr.cstr() );
return false;
}
int iLong = tSchema.GetAttrIndex ( tQuery.m_sGeoLongAttr.cstr() );
if ( iLong<0 )
{
sError.SetSprintf ( "unknown latitude attribute '%s'", tQuery.m_sGeoLongAttr.cstr() );
return false;
}
m_tGeoLatLoc = tSchema.GetAttr(iLat).m_tLocator;
m_tGeoLongLoc = tSchema.GetAttr(iLong).m_tLocator;
m_fGeoAnchorLat = tQuery.m_fGeoLatitude;
m_fGeoAnchorLong = tQuery.m_fGeoLongitude;
m_sAttrLat = tSchema.GetAttr(iLat).m_sName;
m_sAttrLon = tSchema.GetAttr(iLong).m_sName;
return true;
}
static inline double sphSqr ( double v )
{
return v*v;
}
float ExprGeodist_c::Eval ( const CSphMatch & tMatch ) const
{
const double R = 6384000;
float plat = tMatch.GetAttrFloat ( m_tGeoLatLoc );
float plon = tMatch.GetAttrFloat ( m_tGeoLongLoc );
double dlat = plat - m_fGeoAnchorLat;
double dlon = plon - m_fGeoAnchorLong;
double a = sphSqr ( sin ( dlat/2 ) ) + cos(plat)*cos(m_fGeoAnchorLat)*sphSqr(sin(dlon/2));
double c = 2*asin ( Min ( 1.0, sqrt(a) ) );
return (float)(R*c);
}
void ExprGeodist_c::FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema )
{
sphFixupLocator ( m_tGeoLatLoc, pOldSchema, pNewSchema );
sphFixupLocator ( m_tGeoLongLoc, pOldSchema, pNewSchema );
}
void ExprGeodist_c::Command ( ESphExprCommand eCmd, void * pArg )
{
if ( eCmd==SPH_EXPR_GET_DEPENDENT_COLS )
{
static_cast<StrVec_t*>(pArg)->Add(m_sAttrLat);
static_cast<StrVec_t*>(pArg)->Add(m_sAttrLon);
}
}
uint64_t ExprGeodist_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
uint64_t uHash = sphCalcExprDepHash ( this, tSorterSchema, uPrevHash, bDisable );
static const char * EXPR_TAG = "ExprGeodist_c";
uHash = sphFNV64 ( EXPR_TAG, (int) strlen(EXPR_TAG), uHash );
uHash = sphFNV64 ( &m_fGeoAnchorLat, sizeof(m_fGeoAnchorLat), uHash );
uHash = sphFNV64 ( &m_fGeoAnchorLong, sizeof(m_fGeoAnchorLong), uHash );
return uHash;
}
ISphExpr * ExprGeodist_c::Clone() const
{
auto * pClone = new ExprGeodist_c;
pClone->m_tGeoLatLoc = m_tGeoLatLoc;
pClone->m_tGeoLongLoc = m_tGeoLongLoc;
pClone->m_fGeoAnchorLat = m_fGeoAnchorLat;
pClone->m_fGeoAnchorLong = m_fGeoAnchorLong;
pClone->m_sAttrLat = m_sAttrLat;
pClone->m_sAttrLon = m_sAttrLon;
return pClone;
}
///////////////////////////////////////////////////////////////////////////////
ISphExpr * CreateExprGeodist ( const CSphQuery & tQuery, const ISphSchema & tSchema, CSphString & sError )
{
auto * pExpr = new ExprGeodist_c;
if ( !pExpr->Setup ( tQuery, tSchema, sError ) )
{
pExpr->Release();
return nullptr;
}
return pExpr;
}
| 4,294
|
C++
|
.cpp
| 119
| 34.07563
| 106
| 0.721284
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,949
|
snippetstream.cpp
|
manticoresoftware_manticoresearch/src/snippetstream.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "snippetstream.h"
#include "sphinxint.h"
#include "stripper/html_stripper.h"
#include "tokenizer/tokenizer.h"
#define UINT32_MASK 0xffffffffUL
#define UINT16_MASK 0xffff
static void CopyString ( BYTE * sDst, const BYTE * sSrc, int iLen )
{
const int MAX_WORD_BYTES = 3*SPH_MAX_WORD_LEN;
int iBackup = ( iLen > MAX_WORD_BYTES ) ? MAX_WORD_BYTES : iLen;
int iBackup2 = ( iBackup+3 )>>2;
DWORD * d = (DWORD*)sDst;
auto * s = (const DWORD*)sSrc;
while ( iBackup2-->0 )
*d++ = *s++;
sDst[iBackup] = '\0';
}
// make zone name lowercase
static void CopyZoneName ( CSphVector<char> & dName, const char * sZone, int iLen )
{
dName.Resize ( iLen+1 );
char * pDst = dName.Begin();
const char * pEnd = sZone + iLen;
while ( sZone<pEnd )
*pDst++ = (char)tolower ( *sZone++ );
dName[iLen] = '\0';
}
static uint64_t PackZone ( DWORD uPosition, int iSiblingIndex, int iZoneType )
{
assert ( iSiblingIndex>=0 && iSiblingIndex<UINT16_MASK );
assert ( iZoneType>=0 && iZoneType<UINT16_MASK );
return ( ( (uint64_t)uPosition<<32 )
| ( ( iSiblingIndex & UINT16_MASK )<<16 )
| ( iZoneType & UINT16_MASK ) );
}
static int FindAddZone ( const char * sZoneName, int iZoneNameLen, SmallStringHash_T<int> & hZones )
{
CSphString sZone;
sZone.SetBinary ( sZoneName, iZoneNameLen );
int * pZoneIndex = hZones ( sZone );
if ( pZoneIndex )
return *pZoneIndex;
int iZone = hZones.GetLength();
hZones.Add ( iZone, sZone );
return iZone;
}
static int AddZone ( const char * pStart, const char * pEnd, int uPosition, HitCollector_i & tFunctor, CSphVector<int> & dZoneStack, CSphVector<char> & dZoneName, const char * pBuf )
{
CSphVector<ZonePacked_t> & dZones = tFunctor.GetZones();
SmallStringHash_T<int> & hZones = tFunctor.GetZoneInfo().m_hZones;
CSphVector<int> & dZonePos = tFunctor.GetZoneInfo().m_dZonePos;
CSphVector<int> & dZoneParent = tFunctor.GetZoneInfo().m_dZoneParent;
bool bNeedExtraZoneInfo = tFunctor.NeedExtraZoneInfo();
int iZone;
// span's management
if ( *pStart!='/' ) // open zone
{
// zone stack management
int iSelf = dZones.GetLength();
dZoneStack.Add ( iSelf );
// add zone itself
int iZoneNameLen = int ( pEnd-pStart ) - 1;
CopyZoneName ( dZoneName, pStart, iZoneNameLen );
iZone = FindAddZone ( dZoneName.Begin(), iZoneNameLen, hZones );
dZones.Add ( PackZone ( uPosition, iSelf, iZone ) );
if ( bNeedExtraZoneInfo )
{
// the parent for the open zone is the zone itself
dZoneParent.Add ( iZone );
// zone position in characters
dZonePos.Add ( int ( pStart-pBuf ) );
}
#ifndef NDEBUG
if ( !bNeedExtraZoneInfo )
dZonePos.Add ( int ( pStart-pBuf ) );
#endif
} else // close zone
{
#ifndef NDEBUG
// lets check open - close tags match
assert ( dZoneStack.GetLength() && dZoneStack.Last()<dZones.GetLength() );
int iOpening = dZonePos [ dZoneStack.Last() ];
assert ( iOpening<pEnd-pBuf && strncmp ( pBuf+iOpening, pStart+1, pEnd-pStart-2 )==0 );
#endif
int iZoneNameLen = int ( pEnd-pStart ) - 2;
CopyZoneName ( dZoneName, pStart+1, iZoneNameLen );
iZone = FindAddZone ( dZoneName.Begin(), iZoneNameLen, hZones );
int iOpen = dZoneStack.Last();
int iClose = dZones.GetLength();
uint64_t uOpenPacked = dZones[ iOpen ];
DWORD uOpenPos = (DWORD)( ( uOpenPacked>>32 ) & UINT32_MASK );
assert ( iZone==(int)( uOpenPacked & UINT16_MASK ) ); // check for zone's types match;
dZones[iOpen] = PackZone ( uOpenPos, iClose, iZone );
dZones.Add ( PackZone ( uPosition, iOpen, iZone ) );
if ( bNeedExtraZoneInfo )
{
// zone position in characters
dZonePos.Add ( int ( pStart-pBuf ) );
// the parent for the closing zone is the previous zone on stack
int iParentZone = dZoneStack.GetLength()>2 ? dZoneStack[dZoneStack.GetLength()-2] : 0;
uint64_t uParentPacked = dZones.GetLength() && iParentZone<dZones.GetLength() ? dZones[iParentZone] : 0;
dZoneParent.Add ( (int)( uParentPacked & UINT16_MASK ) );
}
#ifndef NDEBUG
if ( !bNeedExtraZoneInfo )
dZonePos.Add ( int ( pStart-pBuf ) );
#endif
// pop up current zone from zone's stack
dZoneStack.Resize ( dZoneStack.GetLength()-1 );
}
return iZone;
}
static int FindTagEnd ( const char * sData )
{
assert ( *sData=='<' );
const char * s = sData+1;
// we just scan until EOLN or tag end
while ( *s && *s!='>' )
{
// exit on duplicate
if ( *s=='<' )
return -1;
if ( *s=='\'' || *s=='"' )
s = (const char *)SkipQuoted ( (const BYTE *)s );
else
s++;
}
if ( !*s )
return -1;
return int ( s-sData );
}
//////////////////////////////////////////////////////////////////////////
/// functor that maps collected tokens into a stream
class CacheStreamer_c : public CacheStreamer_i
{
public:
explicit CacheStreamer_c ( int iDocLen );
void StoreToken ( const TokenInfo_t & tTok, int iTermIndex ) final;
void StoreOverlap ( int iStart, int iLen, int iBoundary ) final;
void StoreSkipHtml ( int iStart, int iLen ) final;
void StoreSPZ ( BYTE iSPZ, DWORD uPosition, const char *, int iZone ) final;
void StoreTail ( int iStart, int iLen, int iBoundary ) final;
void SetZoneInfo ( const FunctorZoneInfo_t & tZoneInfo ) final { m_pZoneInfo = &tZoneInfo; }
void Tokenize ( TokenFunctor_i & tFunctor ) final;
bool IsEmpty() const final;
private:
enum
{
TYPE_TOKEN1 = 0, ///< 1-byte token (4-bit code, and 4-bit len payload)
TYPE_TOKEN2, ///< 2-byte token (4-bit code, and 8-bit len payload)
TYPE_OVERLAP1, ///< 1-byte overlap (4-bit code, and 4-bit len payload)
TYPE_TOKOVER1, ///< 1-byte token/overlap combo (4-bit code, 4-bit token len (overlap len is always 1))
TYPE_TOKOVER2, ///< 1-byte token/overlap combo (4-bit code, 4-bit token len (overlap len is always 2))
TYPE_TOKOVER3, ///< 1-byte token/overlap combo (4-bit code, 4-bit token len (overlap len is always 3))
TYPE_TOKOVER4, ///< 1-byte token/overlap combo (4-bit code, 4-bit token len (overlap len is always 4))
TYPE_TOKOVER5, ///< 1-byte token/overlap combo (4-bit code, 3-bit token len, 1-bit overlap len)
TYPE_TOKEN, ///< generic fat token
TYPE_OVERLAP, ///< generic fat overlap
TYPE_SKIPHTML,
TYPE_SPZ,
TYPE_TAIL,
TYPE_MULTIFORM,
TYPE_TOTAL
};
STATIC_ASSERT ( TYPE_TOTAL<=15, OUT_OF_TYPECODES );
CSphTightVector<BYTE> m_dTokenStream;
int m_iReadPtr = 0;
int m_iLastStart = 0; ///< last delta coded token offset, in bytes
int m_iLastPos = 0; ///< last delta coded token number, in tokens
int m_eLastStored = TYPE_TOTAL;
const FunctorZoneInfo_t * m_pZoneInfo = nullptr;
inline BYTE * StoreEntry ( int iBytes );
inline void ZipInt ( DWORD uValue );
inline DWORD UnzipInt();
};
CacheStreamer_c::CacheStreamer_c ( int iDocLen )
{
m_dTokenStream.Reserve ( (iDocLen*2)/5 );
m_dTokenStream.Add ( 0 );
}
inline BYTE * CacheStreamer_c::StoreEntry ( int iBytes )
{
return m_dTokenStream.AddN ( iBytes );
}
void CacheStreamer_c::StoreOverlap ( int iStart, int iLen, int iBoundary )
{
assert ( iLen>0 && iLen<=USHRT_MAX );
int iDstart = iStart - m_iLastStart;
m_iLastStart = iStart + iLen;
if ( iDstart==0 && iLen<16 && iBoundary<0 )
{
// try to store a token+overlap combo
if ( m_eLastStored==TYPE_TOKEN1 )
{
int iTokLen = m_dTokenStream.Last() & 15;
assert ( iTokLen > 0 );
if ( iLen<=4 && iTokLen<=16 )
{
BYTE uType = (BYTE)(TYPE_TOKOVER1+iLen-1);
m_dTokenStream.Last() = BYTE ( ( uType<<4 )+ (BYTE)iTokLen-1 );
m_eLastStored = uType;
return;
} else if ( iLen>=5 && iLen<=6 && iTokLen<=8 )
{
m_dTokenStream.Last() = (BYTE)( ( TYPE_TOKOVER5<<4 ) + ( ( iTokLen-1 ) << 1 ) + iLen-5 );
m_eLastStored = TYPE_TOKOVER5;
return;
}
}
// OVERLAP1, most frequent path
// delta_start is 0, boundary is -1, length fits in 4 bits, so just 1 byte
m_dTokenStream.Add ( (BYTE)( ( TYPE_OVERLAP1<<4 ) + iLen ) );
m_eLastStored = TYPE_OVERLAP1;
return;
}
// OVERLAP, stupid generic uncompressed path (can optimize with deltas, if needed)
BYTE * p = StoreEntry ( 11 );
p[0] = ( TYPE_OVERLAP<<4 );
sphUnalignedWrite ( p+1, iStart );
sphUnalignedWrite ( p+5, WORD(iLen) );
sphUnalignedWrite ( p+7, iBoundary );
m_eLastStored = TYPE_OVERLAP;
}
void CacheStreamer_c::StoreSkipHtml ( int iStart, int iLen )
{
m_dTokenStream.Add ( TYPE_SKIPHTML<<4 );
ZipInt ( iStart );
ZipInt ( iLen );
m_eLastStored = TYPE_SKIPHTML;
}
void CacheStreamer_c::StoreToken ( const TokenInfo_t & tTok, int iTermIndex )
{
assert ( iTermIndex<USHRT_MAX );
int iDstart = tTok.m_iStart - m_iLastStart;
int iDpos = tTok.m_uPosition - m_iLastPos;
m_iLastStart = tTok.m_iStart + tTok.m_iLen;
m_iLastPos = tTok.m_uPosition;
if ( iDstart==0 && iDpos==1 && tTok.m_bWord && !tTok.m_bStopWord && iTermIndex==-1 && !tTok.m_iMultiPosLen && tTok.m_iLen<=4095 )
{
if ( tTok.m_iLen<16 )
{
// TOKEN1, most frequent path
m_dTokenStream.Add ( (BYTE)( ( TYPE_TOKEN1<<4 ) + tTok.m_iLen ) );
m_eLastStored = TYPE_TOKEN1;
return;
} else
{
// TOKEN2, 2nd most frequent path
m_dTokenStream.Add ( (BYTE)( TYPE_TOKEN2<<4 ) + ( tTok.m_iLen >> 8 ));
m_dTokenStream.Add ( (BYTE)( 0xFF & tTok.m_iLen ) );
m_eLastStored = TYPE_TOKEN2;
return;
}
}
// TOKEN, stupid generic uncompressed path (can optimize with deltas, if needed)
bool bMultiform = ( tTok.m_iMultiPosLen>0 );
BYTE* p = StoreEntry ( 5 );
BYTE eTok = (BYTE)( bMultiform ? TYPE_MULTIFORM : TYPE_TOKEN );
p[0] = BYTE ( eTok<<4 );
sphUnalignedWrite ( p+1, tTok.m_iStart );
ZipInt ( tTok.m_iLen );
p = StoreEntry ( bMultiform ? 8 : 7 );
sphUnalignedWrite ( p, tTok.m_uPosition );
p[4] = BYTE ( ( tTok.m_bWord<<1 ) + tTok.m_bStopWord );
sphUnalignedWrite ( p+5, (WORD)(iTermIndex+1) );
if ( bMultiform )
p[7] = (BYTE)tTok.m_iMultiPosLen;
m_eLastStored = eTok;
}
void CacheStreamer_c::StoreSPZ ( BYTE iSPZ, DWORD uPosition, const char *, int iZone )
{
m_dTokenStream.Add ( TYPE_SPZ<<4 );
ZipInt ( iSPZ );
ZipInt ( uPosition );
ZipInt ( iZone==-1 ? 0 : 1 );
if ( iZone!=-1 )
ZipInt ( iZone );
m_eLastStored = TYPE_SPZ;
}
void CacheStreamer_c::StoreTail ( int iStart, int iLen, int iBoundary )
{
m_dTokenStream.Add ( TYPE_TAIL<<4 );
ZipInt ( iStart );
ZipInt ( iLen );
ZipInt ( iBoundary==-1 ? 0 : 1 );
if ( iBoundary!=-1 )
ZipInt ( iBoundary );
m_eLastStored = TYPE_TAIL;
}
void CacheStreamer_c::Tokenize ( TokenFunctor_i & tFunctor )
{
m_iLastStart = 0;
m_iLastPos = 0;
m_iReadPtr = 1;
TokenInfo_t tTok;
bool bStop = false;
CSphVector<SphWordID_t> dTmp;
while ( m_iReadPtr < m_dTokenStream.GetLength() )
{
BYTE eTok = m_dTokenStream [ m_iReadPtr ]>>4;
switch ( eTok )
{
case TYPE_OVERLAP1:
{
int iLen = m_dTokenStream [ m_iReadPtr++ ] & 15;
bStop = !tFunctor.OnOverlap ( m_iLastStart, iLen, -1 );
m_iLastStart += iLen;
}
break;
case TYPE_OVERLAP:
{
BYTE * p = &m_dTokenStream [ m_iReadPtr ];
int iStart = sphUnalignedRead ( *(DWORD*)(p+1) );
int iLen = sphUnalignedRead ( *(WORD*)(p+5) );
int iBoundary = sphUnalignedRead ( *(int*)(p+7) );
m_iReadPtr += 11;
m_iLastStart = iStart + iLen;
bStop = !tFunctor.OnOverlap ( iStart, iLen, iBoundary );
}
break;
case TYPE_SKIPHTML:
{
m_iReadPtr++;
DWORD uStart = UnzipInt ();
DWORD uLen = UnzipInt ();
tFunctor.OnSkipHtml ( uStart, uLen );
}
break;
case TYPE_TOKEN:
case TYPE_MULTIFORM:
{
BYTE * p = &m_dTokenStream [ m_iReadPtr ];
tTok.m_iStart = sphUnalignedRead ( *(DWORD*)(p+1) );
m_iReadPtr += 5;
tTok.m_iLen = UnzipInt(); // p[5];
p = &m_dTokenStream[m_iReadPtr];
tTok.m_uPosition = sphUnalignedRead ( *(DWORD*)(p) );
tTok.m_bWord = ( p[4] & 2 )!=0;
tTok.m_bStopWord = ( p[4] & 1 )!=0;
tTok.m_iTermIndex = (int)sphUnalignedRead ( *(WORD*)(p+5) ) - 1;
if ( eTok==TYPE_TOKEN )
{
tTok.m_iMultiPosLen = 0;
m_iReadPtr += 7;
} else
{
tTok.m_iMultiPosLen = (int)( p[7] );
m_iReadPtr += 8;
}
m_iLastStart = tTok.m_iStart + tTok.m_iLen;
m_iLastPos = tTok.m_uPosition;
tTok.m_sWord = NULL;
bStop = !tFunctor.OnToken ( tTok, dTmp, NULL );
}
break;
case TYPE_TOKEN1:
{
tTok.m_iStart = m_iLastStart;
tTok.m_iLen = m_dTokenStream [ m_iReadPtr++ ] & 15;
m_iLastStart += tTok.m_iLen;
tTok.m_uPosition = ++m_iLastPos;
tTok.m_bWord = true;
tTok.m_bStopWord = false;
tTok.m_iTermIndex = -1;
tTok.m_iMultiPosLen = 0;
tTok.m_sWord = NULL;
bStop = !tFunctor.OnToken ( tTok, dTmp, NULL );
}
break;
case TYPE_TOKEN2:
{
tTok.m_iStart = m_iLastStart;
tTok.m_iLen = ( ( m_dTokenStream[m_iReadPtr] & 15 ) << 8 ) + m_dTokenStream[m_iReadPtr + 1];
m_iReadPtr += 2;
m_iLastStart += tTok.m_iLen;
tTok.m_uPosition = ++m_iLastPos;
tTok.m_bWord = true;
tTok.m_bStopWord = false;
tTok.m_iTermIndex = -1;
tTok.m_iMultiPosLen = 0;
tTok.m_sWord = NULL;
bStop = !tFunctor.OnToken ( tTok, dTmp, NULL );
}
break;
case TYPE_TOKOVER1:
case TYPE_TOKOVER2:
case TYPE_TOKOVER3:
case TYPE_TOKOVER4:
{
BYTE iStored = m_dTokenStream [ m_iReadPtr++ ];
int iLen = ( iStored>>4 ) - TYPE_TOKOVER1 + 1;
tTok.m_iStart = m_iLastStart;
tTok.m_iLen = ( iStored & 15 ) + 1;
m_iLastStart += tTok.m_iLen;
tTok.m_uPosition = ++m_iLastPos;
tTok.m_bWord = true;
tTok.m_bStopWord = false;
tTok.m_iTermIndex = -1;
tTok.m_iMultiPosLen = 0;
tTok.m_sWord = NULL;
bStop = !tFunctor.OnToken ( tTok, dTmp, NULL );
if ( bStop )
break;
bStop = !tFunctor.OnOverlap ( m_iLastStart, iLen, -1 );
m_iLastStart += iLen;
}
break;
case TYPE_TOKOVER5:
{
BYTE iStored = m_dTokenStream [ m_iReadPtr++ ];
tTok.m_iStart = m_iLastStart;
tTok.m_iLen = ( ( iStored >> 1 ) & 7 ) + 1;
m_iLastStart += tTok.m_iLen;
tTok.m_uPosition = ++m_iLastPos;
tTok.m_bWord = true;
tTok.m_bStopWord = false;
tTok.m_iTermIndex = -1;
tTok.m_iMultiPosLen = 0;
tTok.m_sWord = NULL;
bStop = !tFunctor.OnToken ( tTok, dTmp, NULL );
if ( bStop )
break;
int iLen = ( iStored & 1 ) + 5;
bStop = !tFunctor.OnOverlap ( m_iLastStart, iLen, -1 );
m_iLastStart += iLen;
}
break;
case TYPE_SPZ:
{
m_iReadPtr++;
assert ( m_pZoneInfo );
BYTE uSPZ = (BYTE)UnzipInt ();
DWORD uPosition = UnzipInt ();
DWORD uFlag = UnzipInt ();
int iZone = -1;
if ( uFlag==1 )
iZone = UnzipInt ();
const char * szZoneName = NULL;
if ( iZone!=-1 )
{
// fixme: it can be a lot faster
for ( const auto& tZone : m_pZoneInfo->m_hZones )
if ( tZone.second==iZone )
{
szZoneName = tZone.first.cstr();
break;
}
}
tFunctor.OnSPZ ( uSPZ, uPosition, szZoneName, iZone );
}
break;
case TYPE_TAIL:
{
m_iReadPtr++;
DWORD uStart = UnzipInt ();
DWORD uLen = UnzipInt ();
DWORD uFlag = UnzipInt ();
int iBoundary = -1;
if ( uFlag==1 )
iBoundary = UnzipInt ();
tFunctor.OnTail ( uStart, uLen, iBoundary );
}
break;
default:
assert ( 0 && "INTERNAL ERROR: unhandled type in token cache" );
bStop = true;
break;
}
if ( bStop )
break;
}
tFunctor.OnFinish();
}
bool CacheStreamer_c::IsEmpty() const
{
return m_dTokenStream.IsEmpty() || ( m_dTokenStream.GetLength()==1 && !m_dTokenStream[0] );
}
inline void CacheStreamer_c::ZipInt ( DWORD uValue )
{
ZipValueBE ( [this] ( BYTE b ) { m_dTokenStream.Add ( b ); }, uValue );
}
inline DWORD CacheStreamer_c::UnzipInt()
{
return UnzipValueBE<DWORD> ( [this]() mutable { return m_dTokenStream[m_iReadPtr++]; } );
}
//////////////////////////////////////////////////////////////////////////
CacheStreamer_i * CreateCacheStreamer ( int iDocLen )
{
return new CacheStreamer_c(iDocLen);
}
void TokenizeDocument ( HitCollector_i & tFunctor, const CSphHTMLStripper * pStripper, DWORD iSPZ )
{
TokenizerRefPtr_c pTokenizer = tFunctor.GetTokenizer();
DictRefPtr_c & pDict = tFunctor.GetDict();
const char * pStartPtr = pTokenizer->GetBufferPtr ();
const char * pLastTokenEnd = pStartPtr;
const char * pBufferEnd = pTokenizer->GetBufferEnd();
BYTE sNonStemmed [ 3*SPH_MAX_WORD_LEN+4];
TokenInfo_t tTok;
tTok.m_iStart = 0;
tTok.m_uPosition = 0;
tTok.m_sWord = sNonStemmed;
tTok.m_bStopWord = false;
tTok.m_iTermIndex = -1;
tTok.m_iMultiPosLen = 0;
const CSphIndexSettings & tIndexSettings = tFunctor.GetIndexSettings();
const SnippetQuerySettings_t & tSnippetQuery = tFunctor.GetSnippetQuery();
bool bRetainHtml = tSnippetQuery.m_sStripMode=="retain";
int iBoundaryStep = tIndexSettings.m_iBoundaryStep;
BYTE * sWord = NULL;
DWORD uPosition = 0;
DWORD uStep = 1;
const char * pBlendedStart = NULL;
const char * pBlendedEnd = NULL;
bool bBlendedHead = false;
bool bBlendedPart = false;
CSphVector<SphWordID_t> dMultiToken;
CSphVector<int> dMultiPosDelta;
CSphVector<int> dZoneStack;
CSphVector<char> dZoneName ( 16+3*SPH_MAX_WORD_LEN );
// FIXME!!! replace by query SPZ extraction pass
if ( !iSPZ && bRetainHtml )
iSPZ = MAGIC_CODE_ZONE;
while ( ( sWord = pTokenizer->GetToken() )!=NULL )
{
const char * pTokenStart = pTokenizer->GetTokenStart ();
tTok.m_iMultiPosLen = 0;
dMultiPosDelta.Resize ( 0 );
if ( pBlendedEnd<pTokenStart )
{
// FIXME!!! implement proper handling of blend-chars
if ( pLastTokenEnd<pBlendedEnd && bBlendedPart )
{
tTok.m_uWordId = 0;
tTok.m_bStopWord = false;
tTok.m_uPosition = uPosition; // let's stick to last blended part
tTok.m_iStart = int ( pLastTokenEnd - pStartPtr );
tTok.m_iLen = int ( pBlendedEnd - pLastTokenEnd );
tTok.m_bWord = false;
if ( !tFunctor.OnToken ( tTok, dMultiToken, NULL ) )
{
tFunctor.OnFinish();
return;
}
pLastTokenEnd = pBlendedEnd;
}
dMultiToken.Resize ( 0 );
}
uPosition += uStep + pTokenizer->GetOvershortCount();
if ( pTokenizer->GetBoundary() )
uPosition += iBoundaryStep;
if ( pTokenizer->TokenIsBlended() )
uStep = 0;
// collect all tokens from multi destination of multi word-form
bool bMultiDestHead = false;
int iDestCount = 0;
pTokenizer->WasTokenMultiformDestination ( bMultiDestHead, iDestCount );
if ( bMultiDestHead )
{
assert ( iDestCount>1 );
tTok.m_iMultiPosLen = iDestCount;
int iLastToken = iDestCount-1;
// blended from destination wordform means multiple lemma from appropriate destination token
bool bWasBlended = pTokenizer->TokenIsBlended ();
dMultiToken.Add ( pDict->GetWordID ( sWord ) );
dMultiPosDelta.Add ( 0 );
int iToken = ( bWasBlended ? 0 : 1 );
while (true)
{
sWord = pTokenizer->GetToken ();
assert ( sWord );
bool bBlended = pTokenizer->TokenIsBlended();
if ( iToken==iLastToken && !bBlended )
break;
dMultiToken.Add ( pDict->GetWordID ( sWord ) );
dMultiPosDelta.Add ( bWasBlended ? 0 : 1 );
bWasBlended = bBlended;
if ( !bBlended )
iToken++;
}
dMultiPosDelta.Add ( bWasBlended ? 0 : 1 );
#ifndef NDEBUG
int iDeltaPos = 0;
ARRAY_FOREACH ( i, dMultiPosDelta )
iDeltaPos += dMultiPosDelta[i];
assert ( iDeltaPos==iDestCount-1 );
#endif
uStep = iDestCount;
}
// handle only blended parts
if ( pTokenizer->TokenIsBlended() && !bMultiDestHead )
{
if ( tIndexSettings.m_bIndexExactWords && pTokenizer->GetTokenMorph()!=SPH_TOKEN_MORPH_GUESS )
{
BYTE sTmpBuf [ 3*SPH_MAX_WORD_LEN+4];
sTmpBuf[0] = MAGIC_WORD_HEAD_NONSTEMMED;
CopyString ( sTmpBuf+1, sWord, int ( pTokenizer->GetTokenEnd() - pTokenStart ) );
dMultiToken.Add ( pDict->GetWordIDNonStemmed ( sTmpBuf ) );
}
// must be last because it can change (stem) sWord
dMultiToken.Add ( pDict->GetWordID ( sWord ) );
pBlendedStart = pTokenizer->GetTokenStart();
pBlendedEnd = Max ( pBlendedEnd, pTokenizer->GetTokenEnd() );
bBlendedHead = true;
continue;
}
if ( pTokenStart>pLastTokenEnd )
{
bool bDone = false;
if ( pBlendedStart<pTokenStart && bBlendedHead )
{
// FIXME!!! implement proper handling of blend-chars
if ( ( pBlendedStart - pLastTokenEnd )>0 )
bDone = !tFunctor.OnOverlap ( int ( pLastTokenEnd-pStartPtr ), int ( pBlendedStart-pLastTokenEnd ), pTokenizer->GetBoundary() ? pTokenizer->GetBoundaryOffset() : -1 );
tTok.m_uWordId = 0;
tTok.m_bStopWord = false;
tTok.m_uPosition = uPosition; // let's stick to 1st blended part
tTok.m_iStart = int ( pBlendedStart - pStartPtr );
tTok.m_iLen = int ( pTokenStart - pBlendedStart );
tTok.m_bWord = false;
if ( !bDone )
bDone = !tFunctor.OnToken ( tTok, dMultiToken, &dMultiPosDelta );
} else
bDone = !tFunctor.OnOverlap ( int ( pLastTokenEnd-pStartPtr ), int ( pTokenStart - pLastTokenEnd ), pTokenizer->GetBoundary() ? pTokenizer->GetBoundaryOffset() : -1 );
if ( bDone )
{
tFunctor.OnFinish();
return;
}
pLastTokenEnd = pTokenStart;
}
bBlendedHead = false;
bBlendedPart = pTokenizer->TokenIsBlendedPart();
if ( bRetainHtml && *pTokenStart=='<' )
{
const html_stripper::StripperTag_t * pTag = NULL;
const BYTE * sZoneName = NULL;
const char * pEndSPZ = NULL;
int iZoneNameLen = 0;
if ( iSPZ && pStripper && pTokenStart+2<pBufferEnd && ( pStripper->IsValidTagStart ( *(pTokenStart+1) ) || pTokenStart[1]=='/') )
{
pEndSPZ = (const char *)pStripper->FindTag ( (const BYTE *)pTokenStart+1, &pTag, &sZoneName, &iZoneNameLen );
}
// regular HTML markup - keep it
int iTagEnd = FindTagEnd ( pTokenStart );
if ( iTagEnd!=-1 )
{
assert ( pTokenStart+iTagEnd<pTokenizer->GetBufferEnd() );
tFunctor.OnSkipHtml ( int ( pTokenStart-pStartPtr ), iTagEnd+1 );
pTokenizer->SetBufferPtr ( pTokenStart+iTagEnd+1 );
pLastTokenEnd = pTokenStart+iTagEnd+1; // fix it up to prevent adding last chunk on exit
}
if ( pTag ) // (!S)PZ fix-up
{
pEndSPZ += ( pEndSPZ+1<=pBufferEnd && ( *pEndSPZ )!='\0' ); // skip closing angle bracket, if any
assert ( pTag->m_bPara || pTag->m_bZone );
assert ( pTag->m_bPara || ( pEndSPZ && ( pEndSPZ[0]=='\0' || pEndSPZ[-1]=='>' ) ) ); // should be at tag's end
assert ( pEndSPZ && pEndSPZ<=pBufferEnd );
// handle paragraph boundaries
if ( pTag->m_bPara )
{
tFunctor.OnSPZ ( MAGIC_CODE_PARAGRAPH, uPosition, NULL, -1 );
} else if ( pTag->m_bZone ) // handle zones
{
int iZone = AddZone ( pTokenStart+1, pTokenStart+2+iZoneNameLen, uPosition, tFunctor, dZoneStack, dZoneName, pStartPtr );
tFunctor.OnSPZ ( MAGIC_CODE_ZONE, uPosition, dZoneName.Begin(), iZone );
}
}
if ( iTagEnd )
continue;
}
// handle SPZ tokens GE then needed
// add SENTENCE, PARAGRAPH, ZONE token, do junks and tokenizer and pLastTokenEnd fix up
// FIXME!!! it heavily depends on such attitude MAGIC_CODE_SENTENCE < MAGIC_CODE_PARAGRAPH < MAGIC_CODE_ZONE
if ( *sWord==MAGIC_CODE_SENTENCE || *sWord==MAGIC_CODE_PARAGRAPH || *sWord==MAGIC_CODE_ZONE )
{
int iZone = -1;
if ( *sWord==MAGIC_CODE_ZONE )
{
const char * pZoneEnd = pTokenizer->GetBufferPtr();
const char * pZoneStart = pZoneEnd;
while ( *pZoneEnd && *pZoneEnd!=MAGIC_CODE_ZONE )
pZoneEnd++;
pZoneEnd++; // skip zone token too
pTokenizer->SetBufferPtr ( pZoneEnd );
pLastTokenEnd = pZoneEnd; // fix it up to prevent adding last chunk on exit
iZone = AddZone ( pZoneStart, pZoneEnd, uPosition, tFunctor, dZoneStack, dZoneName, pStartPtr );
}
// SPZ token has position and could be last token too
if ( iSPZ && *sWord>=iSPZ )
{
tFunctor.OnSPZ ( *sWord, uPosition, dZoneName.Begin(), iZone );
} else
uStep = 0;
if ( *sWord==MAGIC_CODE_PARAGRAPH )
pLastTokenEnd = pTokenStart+1;
continue;
}
pLastTokenEnd = pTokenizer->GetTokenEnd ();
// might differ when sbsc got replaced by utf codepoint
int iTokenLen = int ( pLastTokenEnd - pTokenStart );
auto iWordLen = (int) strlen ( ( const char *)sWord );
bool bPopExactMulti = false;
if ( tIndexSettings.m_bIndexExactWords )
{
BYTE sTmpBuf [ 3*SPH_MAX_WORD_LEN+4];
sTmpBuf[0] = MAGIC_WORD_HEAD_NONSTEMMED;
CopyString ( sTmpBuf+1, sWord, iWordLen );
dMultiToken.Add ( pDict->GetWordIDNonStemmed ( sTmpBuf ) );
bPopExactMulti = true;
}
// must be last because it can change (stem) sWord
CopyString ( sNonStemmed, sWord, iWordLen );
SphWordID_t iWord = pDict->GetWordID ( sWord );
tTok.m_uWordId = iWord;
tTok.m_bStopWord = false;
if ( !iWord )
tTok.m_bStopWord = pDict->IsStopWord ( sWord );
// compute position
if ( !iWord || tTok.m_bStopWord )
uStep = tIndexSettings.m_iStopwordStep;
else if ( !tTok.m_iMultiPosLen ) // keep position step from multi word-forms
uStep = 1;
tTok.m_uPosition = ( iWord || tTok.m_bStopWord ) ? uPosition : 0;
tTok.m_iStart = int ( pTokenStart - pStartPtr );
tTok.m_iLen = iTokenLen;
tTok.m_bWord = !!iWord;
// match & emit
// star match needs non-stemmed word
if ( !tFunctor.OnToken ( tTok, dMultiToken, &dMultiPosDelta ) )
{
tFunctor.OnFinish();
return;
}
if ( bPopExactMulti )
dMultiToken.Pop();
}
// last space if any
if ( pLastTokenEnd<pBlendedEnd && bBlendedPart )
{
// FIXME!!! implement proper handling of blend-chars
tTok.m_uWordId = 0;
tTok.m_bStopWord = false;
tTok.m_uPosition = uPosition; // let's stick to last blended part, uPosition and not uPosition-1 as no iteration happened at exit
tTok.m_iStart = int ( pLastTokenEnd - pStartPtr );
tTok.m_iLen = int ( pBlendedEnd - pLastTokenEnd );
tTok.m_bWord = false;
tTok.m_iMultiPosLen = 0;
tFunctor.OnToken ( tTok, dMultiToken, &dMultiPosDelta );
pLastTokenEnd = pBlendedEnd;
}
if ( pLastTokenEnd!=pTokenizer->GetBufferEnd() )
tFunctor.OnTail ( int ( pLastTokenEnd-pStartPtr ), int ( pTokenizer->GetBufferEnd() - pLastTokenEnd ), pTokenizer->GetBoundary() ? pTokenizer->GetBoundaryOffset() : -1 );
tFunctor.OnFinish();
}
| 26,335
|
C++
|
.cpp
| 766
| 31.075718
| 182
| 0.670079
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,950
|
taskflushmutable.cpp
|
manticoresoftware_manticoresearch/src/taskflushmutable.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "taskflushmutable.h"
#include "searchdtask.h"
#include "searchdaemon.h"
using namespace Threads;
int64_t& FlushPeriodUs()
{
/// config searchd.rt_flush_period
static int64_t iRtFlushPeriodUs = DEFAULT_FLUSH_PERIOD; // default period is 10 hours
return iRtFlushPeriodUs;
}
void SetRtFlushPeriod ( int64_t iPeriod )
{
FlushPeriodUs() = iPeriod;
}
// thread-safe stringset, internally guarded by rwlock
class StringSetMT_c
{
Threads::Coro::RWLock_c m_dGuard;
sph::StringSet m_dSet GUARDED_BY ( m_dGuard );
volatile bool m_dDisabled = false;
public:
bool AddUniq ( const CSphString& sName ) EXCLUDES ( m_dGuard )
{
SccWL_t wLock ( m_dGuard );
return m_dSet.Add ( sName );
}
void Delete ( const CSphString& sName ) EXCLUDES ( m_dGuard )
{
SccWL_t wLock ( m_dGuard );
m_dSet.Delete ( sName );
}
bool Contains ( const CSphString& sName ) EXCLUDES ( m_dGuard )
{
if ( m_dDisabled )
return false;
SccRL_t rLock ( m_dGuard );
return m_dSet[sName];
}
void Disable()
{
m_dDisabled = true;
}
bool IsDisabled() const
{
return m_dDisabled;
}
};
namespace
{
StringSetMT_c& FlushSet()
{
static StringSetMT_c hFlushSet;
return hFlushSet;
}
void ScheduleFlushTask ( CSphString sName )
{
static int iRtFlushTask = TaskManager::RegisterGlobal ( "Flush mutable table" );
static auto iLastFlushFinishedTime = sphMicroTimer();
TaskManager::ScheduleJob ( iRtFlushTask, iLastFlushFinishedTime + FlushPeriodUs(), [sName = std::move ( sName )]() mutable
{
if ( FlushSet().IsDisabled() || !FlushSet().Contains ( sName ) )
return;
auto pServed = GetServed ( sName );
if ( !pServed || !ServedDesc_t::IsMutable ( pServed ) ) // index went out or not suitable
{
FlushSet().Delete ( sName );
return;
}
RIdx_T<RtIndex_i*> pRT { pServed };
assert ( pRT );
// do the flush
pRT->ForceRamFlush ( "periodic" );
// once more check for disabled - since ForceRamFlush may be long
if ( FlushSet().IsDisabled() )
return;
iLastFlushFinishedTime = sphMicroTimer();
ScheduleFlushTask ( std::move (sName) );
} );
}
void SubscribeFlushIndex ( CSphString sName )
{
if ( FlushSet().IsDisabled ())
return;
if ( FlushSet().AddUniq ( sName ))
ScheduleFlushTask ( std::move ( sName ) );
};
} // namespace
void HookSubscribeMutableFlush ( const CSphString& sName )
{
SubscribeFlushIndex ( sName );
}
void ShutdownFlushingMutable ()
{
FlushSet().Disable();
}
| 2,904
|
C++
|
.cpp
| 106
| 25.283019
| 123
| 0.724063
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,951
|
docidlookup.cpp
|
manticoresoftware_manticoresearch/src/docidlookup.cpp
|
//
//
// Copyright (c) 2018-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "docidlookup.h"
#include "secondaryindex.h"
#include "sphinxfilter.h"
#include "killlist.h"
#include "attribute.h"
#include "fileio.h"
#include "columnarfilter.h"
#include <boost/preprocessor/repetition/repeat.hpp>
template <bool BITMAP>
class CachedIterator_T : public RowidIterator_i
{
public:
CachedIterator_T ( int64_t iRsetEstimate, DWORD uTotalDocs );
bool HintRowID ( RowID_t tRowID ) override;
void SetCutoff ( int iCutoff ) override {} // fixme! add real cutoff
bool WasCutoffHit() const override { return false; }
protected:
static const int MAX_COLLECTED = 512;
CSphFixedVector<RowID_t> m_dCollected {MAX_COLLECTED};
CSphTightVector<RowID_t> m_dRowIDs;
int m_iId = 0;
BitVec_T<uint64_t> m_tBitmap;
RowID_t m_tRowID = 0;
RowID_t m_tMinRowID = INVALID_ROWID;
RowID_t m_tMaxRowID = INVALID_ROWID;
bool m_bFirstTime = true;
FORCE_INLINE void Add ( RowID_t tRowID );
bool Finalize();
bool ReturnRowIdChunk ( RowIdBlock_t & dRowIdBlock );
};
template <>
CachedIterator_T<true>::CachedIterator_T ( int64_t iRsetEstimate, DWORD uTotalDocs )
{
m_tBitmap.Init(uTotalDocs);
}
template <>
CachedIterator_T<false>::CachedIterator_T ( int64_t iRsetEstimate, DWORD uTotalDocs )
{
m_dRowIDs.Reserve(iRsetEstimate);
}
template <>
bool CachedIterator_T<true>::HintRowID ( RowID_t tRowID )
{
if ( tRowID<m_tRowID )
return true;
m_tRowID = tRowID;
while ( m_tRowID<=m_tMaxRowID )
{
if ( m_tBitmap.BitGet(m_tRowID) )
return true;
m_tRowID++;
}
return false;
}
template <>
bool CachedIterator_T<false>::HintRowID ( RowID_t tRowID )
{
RowID_t * pRowID = m_dRowIDs.Begin() + m_iId;
RowID_t * pRowIdMax = m_dRowIDs.End();
if ( m_dRowIDs.IsEmpty() || pRowID>=pRowIdMax )
return false;
const int64_t LINEAR_THRESH = 256;
if ( tRowID - *pRowID < LINEAR_THRESH )
{
const RowID_t * pRowIdStart = m_dRowIDs.Begin();
while ( pRowID<pRowIdMax && *pRowID<tRowID )
pRowID++;
m_iId = pRowID-pRowIdStart;
return pRowID<pRowIdMax;
}
else
{
// we assume we are never rewinding backwards
const RowID_t * pFound = sphBinarySearchFirst ( pRowID, pRowIdMax-1, SphIdentityFunctor_T<RowID_t>(), tRowID );
assert(pFound);
if ( *pFound < tRowID )
return false;
m_iId = pFound-m_dRowIDs.Begin();
return true;
}
}
template <>
void CachedIterator_T<true>::Add ( RowID_t tRowID )
{
if ( m_tMinRowID==INVALID_ROWID )
{
m_tMinRowID = tRowID;
m_tMaxRowID = tRowID;
}
else
{
m_tMinRowID = Min ( m_tMinRowID, tRowID );
m_tMaxRowID = Max ( m_tMaxRowID, tRowID );
}
m_tBitmap.BitSet(tRowID);
}
template<>
void CachedIterator_T<false>::Add ( RowID_t tRowID )
{
m_dRowIDs.Add(tRowID);
}
template<>
bool CachedIterator_T<true>::Finalize()
{
m_tRowID = m_tMinRowID;
return m_tMinRowID!=INVALID_ROWID;
}
template<>
bool CachedIterator_T<false>::Finalize()
{
m_dRowIDs.Sort();
return !m_dRowIDs.IsEmpty();
}
template <>
bool CachedIterator_T<true>::ReturnRowIdChunk ( RowIdBlock_t & dRowIdBlock )
{
RowID_t * pRowIdStart = m_dCollected.Begin();
RowID_t * pRowIdMax = pRowIdStart + m_dCollected.GetLength();
RowID_t * pRowID = pRowIdStart;
while ( m_tRowID<=m_tMaxRowID && pRowID<pRowIdMax )
{
if ( m_tBitmap.BitGet(m_tRowID) )
*pRowID++ = m_tRowID;
m_tRowID++;
}
return ReturnIteratorResult ( pRowID, pRowIdStart, dRowIdBlock );
}
template <>
bool CachedIterator_T<false>::ReturnRowIdChunk ( RowIdBlock_t & dRowIdBlock )
{
RowID_t * pRowIdStart = m_dRowIDs.Begin() + m_iId;
RowID_t * pRowID = Min ( pRowIdStart+m_dCollected.GetLength(), m_dRowIDs.End() );
m_iId += pRowID-pRowIdStart;
return ReturnIteratorResult ( pRowID, pRowIdStart, dRowIdBlock );
}
template <bool ROWID_LIMITS, bool BITMAP>
class RowidIterator_LookupValues_T : public CachedIterator_T<BITMAP>
{
using BASE = CachedIterator_T<BITMAP>;
public:
RowidIterator_LookupValues_T ( const VecTraits_T<DocID_t>& tValues, int64_t iRsetEstimate, DWORD uTotalDocs, const BYTE * pDocidLookup, const RowIdBoundaries_t * pBoundaries = nullptr );
bool GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock ) override;
bool HintRowID ( RowID_t tRowID ) override;
int64_t GetNumProcessed() const override { return m_iProcessed; }
void AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const override { dDesc.Add ( { "id", "DocidIndex" } ); }
private:
RowIdBoundaries_t m_tBoundaries;
int64_t m_iProcessed {0};
LookupReaderIterator_c m_tLookupReader;
DocidListReader_c m_tFilterReader;
bool Fill();
FORCE_INLINE bool FillIfFirstTime();
};
template <bool ROWID_LIMITS, bool BITMAP>
RowidIterator_LookupValues_T<ROWID_LIMITS, BITMAP>::RowidIterator_LookupValues_T ( const VecTraits_T<DocID_t>& tValues, int64_t iRsetEstimate, DWORD uTotalDocs, const BYTE * pDocidLookup, const RowIdBoundaries_t * pBoundaries )
: BASE ( iRsetEstimate, uTotalDocs )
, m_tLookupReader ( pDocidLookup )
, m_tFilterReader ( tValues )
{
if ( pBoundaries )
m_tBoundaries = *pBoundaries;
}
template <bool ROWID_LIMITS, bool BITMAP>
bool RowidIterator_LookupValues_T<ROWID_LIMITS,BITMAP>::GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock )
{
if ( !FillIfFirstTime() )
return false;
return BASE::ReturnRowIdChunk(dRowIdBlock);
}
template <bool ROWID_LIMITS, bool BITMAP>
bool RowidIterator_LookupValues_T<ROWID_LIMITS,BITMAP>::HintRowID ( RowID_t tRowID )
{
if ( !FillIfFirstTime() )
return false;
return BASE::HintRowID(tRowID);
}
template <bool ROWID_LIMITS, bool BITMAP>
bool RowidIterator_LookupValues_T<ROWID_LIMITS,BITMAP>::Fill()
{
DocID_t tLookupDocID = 0;
DocID_t tFilterDocID = 0;
RowID_t tLookupRowID = INVALID_ROWID;
bool bHaveFilterDocs = m_tFilterReader.ReadDocID(tFilterDocID);
bool bHaveLookupDocs = m_tLookupReader.Read ( tLookupDocID, tLookupRowID );
m_iProcessed += bHaveFilterDocs ? 1 : 0;
m_iProcessed += bHaveLookupDocs ? 1 : 0;
while ( bHaveFilterDocs && bHaveLookupDocs )
{
if ( tFilterDocID < tLookupDocID )
{
m_tFilterReader.HintDocID(tLookupDocID);
bHaveFilterDocs = m_tFilterReader.ReadDocID ( tFilterDocID );
}
else if ( tFilterDocID > tLookupDocID )
{
m_tLookupReader.HintDocID(tFilterDocID);
bHaveLookupDocs = m_tLookupReader.Read ( tLookupDocID, tLookupRowID );
}
else
{
// lookup reader can have duplicates; filter reader can't have duplicates
// advance only the lookup reader
if ( ROWID_LIMITS )
{
if ( tLookupRowID>=m_tBoundaries.m_tMinRowID && tLookupRowID<=m_tBoundaries.m_tMaxRowID )
BASE::Add(tLookupRowID);
}
else
BASE::Add(tLookupRowID);
bHaveLookupDocs = m_tLookupReader.Read ( tLookupDocID, tLookupRowID );
}
m_iProcessed++;
}
return BASE::Finalize();
}
template <bool ROWID_LIMITS, bool BITMAP>
bool RowidIterator_LookupValues_T<ROWID_LIMITS,BITMAP>::FillIfFirstTime()
{
if ( !BASE::m_bFirstTime )
return true;
BASE::m_bFirstTime = false;
return Fill();
}
//////////////////////////////////////////////////////////////////////////
class DocIdCheck_i
{
public:
virtual ~DocIdCheck_i() = default;
virtual void Init() {};
virtual void DisableRewinding() = 0;
virtual std::pair<bool,bool> Check ( uint64_t uValue ) = 0;
};
class DocIdCheck_c : public DocIdCheck_i
{
public:
DocIdCheck_c ( uint64_t uValue, std::shared_ptr<LookupReaderIterator_c> & pReader )
: m_uValue ( uValue )
, m_pReader ( pReader )
{}
void DisableRewinding() override { m_bCanRewind = false; }
protected:
static constexpr uint64_t MIN_NEG = uint64_t(INT64_MIN);
uint64_t m_uValue;
bool m_bCanRewind = true;
bool m_bRewound = false;
std::shared_ptr<LookupReaderIterator_c> m_pReader;
};
template <bool EQ>
class GtPos_T : public DocIdCheck_c
{
using DocIdCheck_c::DocIdCheck_c;
public:
void Init() override
{
if ( m_bCanRewind )
m_pReader->HintDocID(m_uValue);
}
std::pair<bool,bool> Check ( uint64_t uValue ) override
{
if ( uValue >= MIN_NEG )
return { false, true };
if_const ( EQ )
return { uValue >= m_uValue, false };
return { uValue > m_uValue, false };
}
};
template <bool EQ>
class LtPos_T : public DocIdCheck_c
{
using DocIdCheck_c::DocIdCheck_c;
public:
std::pair<bool,bool> Check ( uint64_t uValue ) override
{
if_const ( EQ )
{
if ( uValue <= m_uValue ) return { true, false };
}
else
if ( uValue < m_uValue ) return { true, false };
if ( uValue < MIN_NEG )
{
if ( m_bCanRewind && !m_bRewound )
{
m_bRewound = true;
m_pReader->HintDocID(MIN_NEG);
}
return { false, false };
}
return { true, false };
}
};
template <bool EQ>
class GtNeg_T : public DocIdCheck_c
{
using DocIdCheck_c::DocIdCheck_c;
public:
inline std::pair<bool,bool> Check ( uint64_t uValue ) override
{
if ( uValue < MIN_NEG )
return { true, false };
if ( m_bCanRewind && !m_bRewound && uValue>=MIN_NEG )
{
m_pReader->HintDocID(m_uValue);
m_bRewound = true;
return { false, false };
}
if_const ( EQ )
return { uValue >= m_uValue, false };
return { uValue > m_uValue, false };
}
};
template <bool EQ>
class LtNeg_T : public DocIdCheck_c
{
using DocIdCheck_c::DocIdCheck_c;
public:
void Init() override
{
if ( m_bCanRewind )
m_pReader->HintDocID(MIN_NEG);
}
inline std::pair<bool,bool> Check ( uint64_t uValue ) override
{
if ( uValue < MIN_NEG )
return { false, false };
if_const ( EQ )
{
if ( uValue <= m_uValue ) return { true, false };
}
else
if ( uValue < m_uValue ) return { true, false };
return { false, true };
}
};
template <bool ROWID_LIMITS, bool BITMAP>
class RowidIterator_LookupRange_T : public CachedIterator_T<BITMAP>
{
using BASE = CachedIterator_T<BITMAP>;
public:
RowidIterator_LookupRange_T ( std::shared_ptr<LookupReaderIterator_c> & pReader, DocIdCheck_i * pCheck1, DocIdCheck_i * pCheck2, int64_t iRsetEstimate, DWORD uTotalDocs, const RowIdBoundaries_t * pBoundaries = nullptr );
bool GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock ) override;
bool HintRowID ( RowID_t tRowID ) override;
int64_t GetNumProcessed() const override { return m_iProcessed; }
void AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const override { dDesc.Add ( { "id", "DocidIndex" } ); }
protected:
RowIdBoundaries_t m_tBoundaries;
int64_t m_iProcessed {0};
std::shared_ptr<LookupReaderIterator_c> m_pReader;
std::unique_ptr<DocIdCheck_i> m_pCheck1;
std::unique_ptr<DocIdCheck_i> m_pCheck2;
virtual bool Fill();
FORCE_INLINE bool FillIfFirstTime();
};
template <bool ROWID_LIMITS, bool BITMAP>
RowidIterator_LookupRange_T<ROWID_LIMITS,BITMAP>::RowidIterator_LookupRange_T ( std::shared_ptr<LookupReaderIterator_c> & pReader, DocIdCheck_i * pCheck1, DocIdCheck_i * pCheck2, int64_t iRsetEstimate, DWORD uTotalDocs, const RowIdBoundaries_t * pBoundaries )
: BASE ( iRsetEstimate, uTotalDocs )
, m_pReader ( pReader )
, m_pCheck1 ( pCheck1 )
, m_pCheck2 ( pCheck2 )
{
if ( pBoundaries )
m_tBoundaries = *pBoundaries;
}
template <bool ROWID_LIMITS, bool BITMAP>
bool RowidIterator_LookupRange_T<ROWID_LIMITS,BITMAP>::GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock )
{
if ( !FillIfFirstTime() )
return false;
return BASE::ReturnRowIdChunk(dRowIdBlock);
}
template <bool ROWID_LIMITS, bool BITMAP>
bool RowidIterator_LookupRange_T<ROWID_LIMITS,BITMAP>::HintRowID ( RowID_t tRowID )
{
if ( !FillIfFirstTime() )
return false;
return BASE::HintRowID(tRowID);
}
template <bool ROWID_LIMITS, bool BITMAP>
bool RowidIterator_LookupRange_T<ROWID_LIMITS,BITMAP>::Fill()
{
DocID_t tLookupDocID = 0;
RowID_t tLookupRowID = INVALID_ROWID;
while ( m_pReader->Read ( tLookupDocID, tLookupRowID ) )
{
m_iProcessed++;
if ( m_pCheck1 )
{
auto [ bAccept, bBreak ] = m_pCheck1->Check(tLookupDocID);
if ( bBreak )
break;
if ( !bAccept )
continue;
}
if ( m_pCheck2 )
{
auto [ bAccept, bBreak ] = m_pCheck2->Check(tLookupDocID);
if ( bBreak )
break;
if ( !bAccept )
continue;
}
if ( ROWID_LIMITS )
{
if ( tLookupRowID >= m_tBoundaries.m_tMinRowID && tLookupRowID <= m_tBoundaries.m_tMaxRowID )
BASE::Add(tLookupRowID);
}
else
BASE::Add(tLookupRowID);
}
return BASE::Finalize();
}
template <bool ROWID_LIMITS, bool BITMAP>
bool RowidIterator_LookupRange_T<ROWID_LIMITS,BITMAP>::FillIfFirstTime()
{
if ( !BASE::m_bFirstTime )
return true;
if ( m_pCheck1 )
m_pCheck1->Init();
if ( m_pCheck2 )
m_pCheck2->Init();
BASE::m_bFirstTime = false;
return Fill();
}
//////////////////////////////////////////////////////////////////////////
template <bool ROWID_LIMITS, bool BITMAP>
class RowidIterator_LookupRangeExclude_T : public RowidIterator_LookupRange_T<ROWID_LIMITS, BITMAP>
{
using BASE = RowidIterator_LookupRange_T<ROWID_LIMITS, BITMAP>;
using BASE::BASE;
protected:
bool Fill() override;
private:
void Add ( RowID_t tLookupRowID );
};
template <bool ROWID_LIMITS, bool BITMAP>
void RowidIterator_LookupRangeExclude_T<ROWID_LIMITS,BITMAP>::Add ( RowID_t tLookupRowID )
{
if ( ROWID_LIMITS )
{
if ( tLookupRowID>=BASE::m_tBoundaries.m_tMinRowID && tLookupRowID<=BASE::m_tBoundaries.m_tMaxRowID )
BASE::Add(tLookupRowID);
}
else
BASE::Add(tLookupRowID);
}
template <bool ROWID_LIMITS, bool BITMAP>
bool RowidIterator_LookupRangeExclude_T<ROWID_LIMITS,BITMAP>::Fill()
{
DocID_t tLookupDocID = 0;
RowID_t tLookupRowID = INVALID_ROWID;
RowID_t * pRowIdStart = BASE::m_dCollected.Begin();
RowID_t * pRowIdMax = pRowIdStart + BASE::m_dCollected.GetLength()-1;
RowID_t * pRowID = pRowIdStart;
while ( pRowID<pRowIdMax && ( BASE::m_pCheck1 || BASE::m_pCheck2 ) && BASE::m_pReader->Read ( tLookupDocID, tLookupRowID ) )
{
BASE::m_iProcessed++;
if ( BASE::m_pCheck1 )
{
auto [ bAccept, bBreak ] = BASE::m_pCheck1->Check(tLookupDocID);
if ( bBreak )
BASE::m_pCheck1.reset();
if ( bAccept )
{
Add ( tLookupRowID );
continue;
}
}
if ( BASE::m_pCheck2 )
{
auto [ bAccept, bBreak ] = BASE::m_pCheck2->Check(tLookupDocID);
if ( bBreak )
BASE::m_pCheck2.reset();
if ( bAccept )
{
Add ( tLookupRowID );
continue;
}
}
}
return BASE::Finalize();
}
//////////////////////////////////////////////////////////////////////////
static bool NeedBitmapStorage ( int64_t iRsetSize, DWORD uTotalDocs )
{
return float(iRsetSize)/uTotalDocs > 0.001f;
}
static DocIdCheck_i * CreateCheckGt ( int64_t iMinValue, bool bHasEqualMin, std::shared_ptr<LookupReaderIterator_c> & pReader )
{
int iIndex = ( iMinValue>=0 ? 1 : 0 )*2 + ( bHasEqualMin ? 1 : 0 );
uint64_t uVal = uint64_t(iMinValue);
switch ( iIndex )
{
case 0: return new GtNeg_T<false> ( uVal, pReader );
case 1: return new GtNeg_T<true> ( uVal, pReader );
case 2: return new GtPos_T<false> ( uVal, pReader );
case 3: return new GtPos_T<true> ( uVal, pReader );
default: return nullptr;
}
}
static DocIdCheck_i * CreateCheckLt ( int64_t iMaxValue, bool bHasEqualMax, std::shared_ptr<LookupReaderIterator_c> & pReader )
{
int iIndex = ( iMaxValue>=0 ? 1 : 0 )*2 + ( bHasEqualMax ? 1 : 0 );
uint64_t uVal = uint64_t(iMaxValue);
switch ( iIndex )
{
case 0: return new LtNeg_T<false> ( uVal, pReader );
case 1: return new LtNeg_T<true> ( uVal, pReader );
case 2: return new LtPos_T<false> ( uVal, pReader );
case 3: return new LtPos_T<true> ( uVal, pReader );
default: return nullptr;
}
}
#define DECL_CREATEVALUES( _, n, params ) case n: return new RowidIterator_LookupValues_T<!!( n & 2 ), !!( n & 1 )> params;
static RowidIterator_i * CreateRowidLookupRange ( std::shared_ptr<LookupReaderIterator_c> & pReader, DocIdCheck_i * pCheck1, DocIdCheck_i * pCheck2, int64_t iRsetEstimate, DWORD uTotalDocs, const RowIdBoundaries_t * pBoundaries, bool bBitmap )
{
int iIndex = ( pBoundaries ? 1 : 0 )*2 + ( bBitmap ? 1 : 0 );
switch ( iIndex )
{
case 0: return new RowidIterator_LookupRange_T<false, false> ( pReader, pCheck1, pCheck2, iRsetEstimate, uTotalDocs, pBoundaries );
case 1: return new RowidIterator_LookupRange_T<false, true> ( pReader, pCheck1, pCheck2, iRsetEstimate, uTotalDocs, pBoundaries );
case 2: return new RowidIterator_LookupRange_T<true, false> ( pReader, pCheck1, pCheck2, iRsetEstimate, uTotalDocs, pBoundaries );
case 3: return new RowidIterator_LookupRange_T<true, true> ( pReader, pCheck1, pCheck2, iRsetEstimate, uTotalDocs, pBoundaries );
default:
assert ( 0 && "Internal error" );
return nullptr;
}
}
static RowidIterator_i * CreateRowidLookupRangeExclude ( std::shared_ptr<LookupReaderIterator_c> & pReader, DocIdCheck_i * pCheck1, DocIdCheck_i * pCheck2, int64_t iRsetEstimate, DWORD uTotalDocs, const RowIdBoundaries_t * pBoundaries, bool bBitmap )
{
int iIndex = ( pBoundaries ? 1 : 0 )*2 + ( bBitmap ? 1 : 0 );
switch ( iIndex )
{
case 0: return new RowidIterator_LookupRangeExclude_T<false, false> ( pReader, pCheck1, pCheck2, iRsetEstimate, uTotalDocs, pBoundaries );
case 1: return new RowidIterator_LookupRangeExclude_T<false, true> ( pReader, pCheck1, pCheck2, iRsetEstimate, uTotalDocs, pBoundaries );
case 2: return new RowidIterator_LookupRangeExclude_T<true, false> ( pReader, pCheck1, pCheck2, iRsetEstimate, uTotalDocs, pBoundaries );
case 3: return new RowidIterator_LookupRangeExclude_T<true, true> ( pReader, pCheck1, pCheck2, iRsetEstimate, uTotalDocs, pBoundaries );
default:
assert ( 0 && "Internal error" );
return nullptr;
}
}
static RowidIterator_i * CreateLookupIterator ( const CSphFilterSettings & tFilter, int64_t iRsetEstimate, DWORD uTotalDocs, const BYTE * pDocidLookup, const RowIdBoundaries_t * pBoundaries )
{
if ( tFilter.m_sAttrName!=sphGetDocidName() )
return nullptr;
bool bBitmap = NeedBitmapStorage ( iRsetEstimate, uTotalDocs );
switch ( tFilter.m_eType )
{
case SPH_FILTER_VALUES:
{
int iIndex = !!pBoundaries * 2 + bBitmap;
switch ( iIndex )
{
BOOST_PP_REPEAT ( 4, DECL_CREATEVALUES, ( tFilter.GetValues(), iRsetEstimate, uTotalDocs, pDocidLookup, pBoundaries ) )
default: assert ( 0 && "Internal error" ); return nullptr;
}
}
break;
case SPH_FILTER_RANGE:
{
auto pReader = std::make_shared<LookupReaderIterator_c>(pDocidLookup);
if ( tFilter.m_bExclude )
{
DocIdCheck_i * pCheck1 = tFilter.m_bOpenLeft ? nullptr : CreateCheckLt ( tFilter.m_iMinValue, !tFilter.m_bHasEqualMin, pReader );
DocIdCheck_i * pCheck2 = tFilter.m_bOpenRight ? nullptr : CreateCheckGt ( tFilter.m_iMaxValue, !tFilter.m_bHasEqualMax, pReader );
if ( pCheck1 && pCheck2 )
{
pCheck1->DisableRewinding();
pCheck2->DisableRewinding();
}
return CreateRowidLookupRangeExclude ( pReader, pCheck1, pCheck2, iRsetEstimate, uTotalDocs, pBoundaries, bBitmap );
}
DocIdCheck_i * pCheck1 = tFilter.m_bOpenLeft ? nullptr : CreateCheckGt ( tFilter.m_iMinValue, tFilter.m_bHasEqualMin, pReader );
DocIdCheck_i * pCheck2 = tFilter.m_bOpenRight ? nullptr : CreateCheckLt ( tFilter.m_iMaxValue, tFilter.m_bHasEqualMax, pReader );
return CreateRowidLookupRange ( pReader, pCheck1, pCheck2, iRsetEstimate, uTotalDocs, pBoundaries, bBitmap );
}
default:
break;
}
return nullptr;
}
#undef DECL_CREATEVALUES
#undef DECL_CREATERANGEEX
#undef DECL_CREATERANGE
RowIteratorsWithEstimates_t CreateLookupIterator ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const CSphVector<CSphFilterSettings> & dFilters, const BYTE * pDocidLookup, uint32_t uTotalDocs )
{
RowIdBoundaries_t tBoundaries;
const CSphFilterSettings * pRowIdFilter = GetRowIdFilter ( dFilters, uTotalDocs, tBoundaries );
RowIteratorsWithEstimates_t dIterators;
ARRAY_FOREACH ( i, dSIInfo )
{
auto & tSIInfo = dSIInfo[i];
if ( tSIInfo.m_eType!=SecondaryIndexType_e::LOOKUP )
continue;
RowidIterator_i * pIterator = CreateLookupIterator ( dFilters[i], tSIInfo.m_iRsetEstimate, uTotalDocs, pDocidLookup, pRowIdFilter ? &tBoundaries : nullptr );
if ( pIterator )
{
dIterators.Add ( { pIterator, tSIInfo.m_iRsetEstimate } );
tSIInfo.m_bCreated = true;
}
}
return dIterators;
}
//////////////////////////////////////////////////////////////////////////
DocidLookupWriter_c::DocidLookupWriter_c ( CSphWriter& tWriter, DWORD nDocs )
: m_tWriter { tWriter }
, m_nDocs ( nDocs )
{}
void DocidLookupWriter_c::Start()
{
m_tWriter.PutDword ( m_nDocs );
m_tWriter.PutDword ( DOCS_PER_LOOKUP_CHECKPOINT );
m_tCheckpointStart = m_tWriter.GetPos();
m_tWriter.PutOffset ( 0 ); // reserve space for max docid
int nCheckpoints = (m_nDocs+DOCS_PER_LOOKUP_CHECKPOINT-1)/DOCS_PER_LOOKUP_CHECKPOINT;
m_dCheckpoints.Reset ( nCheckpoints );
for ( int i = 0; i < nCheckpoints; ++i )
{
// reserve space for checkpoints
m_tWriter.PutOffset(0);
m_tWriter.PutOffset(0);
}
}
void DocidLookupWriter_c::AddPair ( const DocidRowidPair_t & tPair )
{
assert ( (uint64_t)tPair.m_tDocID>=(uint64_t)m_tLastDocID );
if ( !(m_iProcessed % DOCS_PER_LOOKUP_CHECKPOINT) )
{
m_dCheckpoints[m_iCheckpoint].m_tBaseDocID = tPair.m_tDocID;
m_dCheckpoints[m_iCheckpoint].m_tOffset = m_tWriter.GetPos();
++m_iCheckpoint;
// no need to store docid for 1st entry
}
else
{
m_tWriter.ZipOffset ( (uint64_t)tPair.m_tDocID-(uint64_t)m_tLastDocID );
}
m_tWriter.PutDword ( tPair.m_tRowID );
m_tLastDocID = tPair.m_tDocID;
++m_iProcessed;
}
bool DocidLookupWriter_c::Finalize ( CSphString & sError )
{
m_tWriter.Flush();
m_tWriter.SeekTo ( m_tCheckpointStart );
m_tWriter.PutOffset ( m_tLastDocID );
for ( const auto & i : m_dCheckpoints )
{
m_tWriter.PutOffset ( i.m_tBaseDocID );
m_tWriter.PutOffset ( i.m_tOffset );
}
m_tWriter.CloseFile();
if ( m_tWriter.IsError() )
{
sError = "error writing .SPT";
return false;
}
return true;
}
bool WriteDocidLookup ( const CSphString & sFilename, const VecTraits_T<DocidRowidPair_t> & dLookup, CSphString & sError )
{
CSphWriter tfWriter;
if ( !tfWriter.OpenFile ( sFilename, sError ) )
return false;
DocidLookupWriter_c tWriter ( tfWriter, dLookup.GetLength() );
tWriter.Start();
for ( const auto & i : dLookup )
tWriter.AddPair(i);
return tWriter.Finalize ( sError );
}
//////////////////////////////////////////////////////////////////////////
LookupReader_c::LookupReader_c ( const BYTE * pData )
{
SetData ( pData );
}
void LookupReader_c::SetData ( const BYTE * pData )
{
m_pData = pData;
if ( !pData )
return;
const BYTE * p = pData;
m_nDocs = *(const DWORD*)p;
p += sizeof(DWORD);
m_nDocsPerCheckpoint = *(const DWORD*)p;
p += sizeof(DWORD);
m_tMaxDocID = *(const DocID_t*)p;
p += sizeof(DocID_t);
m_nCheckpoints = (m_nDocs+m_nDocsPerCheckpoint-1)/m_nDocsPerCheckpoint;
m_pCheckpoints = (const DocidLookupCheckpoint_t *)p;
}
//////////////////////////////////////////////////////////////////////////
LookupReaderIterator_c::LookupReaderIterator_c ( const BYTE * pData )
{
SetData(pData);
}
void LookupReaderIterator_c::SetData ( const BYTE * pData )
{
LookupReader_c::SetData(pData);
SetCheckpoint ( m_pCheckpoints );
}
| 23,309
|
C++
|
.cpp
| 710
| 30.4
| 259
| 0.711527
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,952
|
sphinxrt.cpp
|
manticoresoftware_manticoresearch/src/sphinxrt.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxint.h"
#include "sphinxrt.h"
#include "sphinxpq.h"
#include "sphinxsearch.h"
#include "sphinxsort.h"
#include "sphinxutils.h"
#include "fileutils.h"
#include "sphinxplugin.h"
#include "icu.h"
#include "sphinxqcache.h"
#include "attribute.h"
#include "killlist.h"
#include "histogram.h"
#include "accumulator.h"
#include "indexcheck.h"
#include "indexsettings.h"
#include "indexformat.h"
#include "coroutine.h"
#include "mini_timer.h"
#include "binlog.h"
#include "secondaryindex.h"
#include "docidlookup.h"
#include "columnarrt.h"
#include "columnarmisc.h"
#include "sphinx_alter.h"
#include "chunksearchctx.h"
#include "indexfiles.h"
#include "task_dispatcher.h"
#include "tracer.h"
#include "pseudosharding.h"
#include "knnmisc.h"
#include "jsonsi.h"
#include "std/sys.h"
#include "dict/infix/infix_builder.h"
#include <sys/stat.h>
#include <fcntl.h>
#include <atomic>
#if _WIN32
#include <errno.h>
#else
#include <unistd.h>
#include <sys/time.h>
#endif
#include "attrindex_builder.h"
#include "tokenizer/tokenizer.h"
#include "queryfilter.h"
#include "secondarylib.h"
#if WITH_RE2
#include <string>
#include <re2/re2.h>
#endif
using namespace Threads;
//////////////////////////////////////////////////////////////////////////
#define RTDICT_CHECKPOINT_V5 48
// rt-segments tuning
// rate limit for flushing RAM chunk as disk chunk. Rate applied to rt_mem_limit, calculated value used as limit, when we start flushing
constexpr double INITIAL_SAVE_RATE_LIMIT = 0.5; ///< we start rate limiting from this value.
constexpr double MIN_SAVE_RATE_LIMIT = 0.333333; ///< minimal rate limit. Calculated value will never be less that that bound
constexpr double MAX_SAVE_RATE_LIMIT = 0.95; ///< maximal rate limit. It most probably may be reached with very low insertion rate
constexpr double SAVE_RATE_LIMIT_EMERGENCY_STEP = 0.05; ///< emergency back-off.
constexpr int SIMULTANEOUS_SAVE_LIMIT = 2; ///< how many save ops we allow a time
constexpr int MAX_SEGMENTS = 32;
constexpr int MAX_PROGRESSION_SEGMENT = 8;
constexpr int64_t MAX_SEGMENT_VECTOR_LEN = INT_MAX;
constexpr int MAX_TOLERATE_LOAD_SEGMENTS = MAX_SEGMENTS * ( SIMULTANEOUS_SAVE_LIMIT + 1 ); ///< if on load N of segments exceedes this value - perform safe loading
//////////////////////////////////////////////////////////////////////////
#ifndef NDEBUG
#define Verify(_expr) assert(_expr)
#else
#define Verify(_expr) _expr
#endif
#define LOG_LEVEL_RTRDIAG false
#define LOG_LEVEL_RTDDIAG false
#define LOG_LEVEL_RTSAVEDIAG false
#define LOG_LEVEL_RTDIAGV false
#define LOG_LEVEL_RTDIAGVV false
#define LOG_LEVEL_DEBUGV false
#define LOG_COMPONENT_RTSEG __LINE__ << " " << Coro::CurrentScheduler()->Name() << " "
// used in start/merge RAM segments
#define RTRLOG LOGINFO ( RTRDIAG, RTSEG )
// used when logging disk save/optimize
#define RTDLOG LOGINFO ( RTDDIAG, RTSEG )
// ops for save RAM segments as disk chunk
#define RTSAVELOG LOGINFO ( RTSAVEDIAG, RTSEG )
#define RTLOGV LOGINFO ( RTDIAGV, RTSEG )
#define RTLOGVV LOGINFO ( RTDIAGVV, RTSEG )
static bool LOG_LEVEL_RTSPLIT_QUERY = val_from_env ( "MANTICORE_LOG_RTSPLIT_QUERY", false ); // verbose logging split query events, ruled by this env variable
#define LOG_COMPONENT_RTQUERYINFO __LINE__ << " "
#define RTQUERYINFO LOGINFO ( RTSPLIT_QUERY, RTQUERYINFO )
//////////////////////////////////////////////////////////////////////////
// GLOBALS
//////////////////////////////////////////////////////////////////////////
/// check for concurrent changes during binlog replay (used only in asserts)
static auto& g_bRTChangesAllowed = RTChangesAllowed ();
// optimize mode for disk chunks merge fixme! retire?
static bool g_bProgressiveMerge = true;
//////////////////////////////////////////////////////////////////////////
volatile bool &RTChangesAllowed () noexcept
{
static volatile bool bRTChangesAllowed = false;
return bRTChangesAllowed;
}
volatile int &AutoOptimizeCutoffMultiplier() noexcept
{
static int iAutoOptimizeCutoffMultiplier = 1;
return iAutoOptimizeCutoffMultiplier;
}
volatile int AutoOptimizeCutoff() noexcept
{
static int iAutoOptimizeCutoff = GetNumLogicalCPUs() * 2;
return iAutoOptimizeCutoff;
}
//////////////////////////////////////////////////////////////////////////
// Variable Length Byte (VLB) encoding
// store int variable in as much bytes as actually needed to represent it
template < typename T, typename P >
static inline void ZipT_LE ( CSphVector < BYTE, P > & dOut, T uValue )
{
ZipValueLE ( [&dOut] ( BYTE b ) { dOut.Add ( b ); }, uValue );
}
#define SPH_MAX_KEYWORD_LEN (3*SPH_MAX_WORD_LEN+4)
STATIC_ASSERT ( SPH_MAX_KEYWORD_LEN<255, MAX_KEYWORD_LEN_SHOULD_FITS_BYTE );
// Variable Length Byte (VLB) decoding
template < typename T >
static inline void UnzipT_LE ( T * pValue, const BYTE *& pIn )
{
*pValue = UnzipValueLE<T> ( [&pIn]() mutable { return *pIn++; } );
}
template < typename T >
static inline T UnzipT_LE ( const BYTE *& pIn )
{
return UnzipValueLE<T> ( [&pIn]() mutable { return *pIn++; } );
}
// Variable Length Byte (VLB) skipping (BE/LE agnostic)
static inline void SkipZipped ( const BYTE *& pIn )
{
while ( *pIn & 0x80U )
++pIn;
++pIn; // jump over last one
}
#define ZipDword ZipT_LE<DWORD>
#define ZipQword ZipT_LE<uint64_t>
#define UnzipDword UnzipT_LE<DWORD>
#define UnzipQword UnzipT_LE<uint64_t>
#define ZipDocid ZipQword
#define ZipWordid ZipQword
#define UnzipWordid UnzipQword
//////////////////////////////////////////////////////////////////////////
InsertDocData_c::InsertDocData_c ( const ISphSchema & tSchema )
{
m_tDoc.Reset ( tSchema.GetRowSize() );
m_dFields.Resize ( tSchema.GetFieldsCount() );
const CSphColumnInfo * pId = tSchema.GetAttr ( sphGetDocidName() );
assert(pId);
if ( pId->IsColumnar() )
{
int iColumnar = 0;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
if ( tSchema.GetAttr(i).m_sName == sphGetDocidName() )
m_iColumnarID = iColumnar;
iColumnar++;
}
m_dColumnarAttrs.Resize(iColumnar);
}
else
{
m_tDocIDLocator = pId->m_tLocator;
m_tDocIDLocator.m_bDynamic = true;
}
}
void InsertDocData_c::SetID ( SphAttr_t tDocID )
{
if ( m_iColumnarID>=0 )
{
m_dColumnarAttrs[m_iColumnarID] = tDocID;
return;
}
m_tDoc.SetAttr ( m_tDocIDLocator, tDocID );
}
SphAttr_t InsertDocData_c::GetID() const
{
if ( m_iColumnarID>=0 )
return m_dColumnarAttrs[m_iColumnarID];
return m_tDoc.GetAttr(m_tDocIDLocator);
}
void InsertDocData_c::AddMVALength ( int iLength, bool bDefault )
{
m_dMvas.Add ( int64_t(iLength) | ( bDefault ? DEFAULT_FLAG : 0 ) );
}
std::pair<int, bool> InsertDocData_c::ReadMVALength ( const int64_t * & pMVA )
{
bool bDefault = !!(*pMVA & DEFAULT_FLAG);
int iLength = int(*pMVA & ~DEFAULT_FLAG);
pMVA++;
return { iLength, bDefault };
}
void InsertDocData_c::FixParsedMVAs ( const CSphVector<int64_t> & dParsed, int iCount )
{
if ( !iCount )
return;
// dParsed:
// 0 - iCount elements: offset to MVA values with leading MVA element count
// Could be not in right order
ResetMVAs();
for ( int i=0; i<iCount; ++i )
{
int iOff = dParsed[i];
if ( !iOff )
{
AddMVALength(0);
continue;
}
DWORD uMvaCount = dParsed[iOff];
int64_t * pMva = m_dMvas.AddN ( uMvaCount + 1 );
*pMva++ = uMvaCount;
memcpy ( pMva, dParsed.Begin() + iOff + 1, sizeof(m_dMvas[0]) * uMvaCount );
}
}
//////////////////////////////////////////////////////////////////////////
RtSegment_t::RtSegment_t ( DWORD uDocs, const ISphSchema& tSchema )
: m_uRows ( uDocs )
, m_tAliveRows { uDocs }
, m_tDeadRowMap ( uDocs )
, m_tSchema { tSchema }
{
}
RtSegment_t::~RtSegment_t ()
{
if ( m_pRAMCounter )
FixupRAMCounter ( -GetUsedRam() );
}
void RtSegment_t::UpdateUsedRam() const NO_THREAD_SAFETY_ANALYSIS
{
int64_t iUsedRam = 0;
iUsedRam += m_dWords.AllocatedBytes();
iUsedRam += m_dDocs.AllocatedBytes();
iUsedRam += m_dHits.AllocatedBytes();
iUsedRam += m_dBlobs.AllocatedBytes();
iUsedRam += m_dKeywordCheckpoints.AllocatedBytes();
iUsedRam += m_dRows.AllocatedBytes();
iUsedRam += m_dInfixFilterCP.AllocatedBytes();
iUsedRam += m_pDocstore ? m_pDocstore->AllocatedBytes() : 0;
iUsedRam += m_pColumnar ? m_pColumnar->AllocatedBytes() : 0;
FixupRAMCounter ( iUsedRam - std::exchange ( m_iUsedRam, iUsedRam ) );
}
int64_t RtSegment_t::GetUsedRam() const
{
if ( !m_iUsedRam )
UpdateUsedRam();
return m_iUsedRam;
}
void RtSegment_t::FixupRAMCounter ( int64_t iDelta ) const
{
if ( !m_pRAMCounter )
return;
m_pRAMCounter->fetch_add ( iDelta, std::memory_order_relaxed );
TRACE_COUNTER ( "mem", perfetto::CounterTrack ( "RAMCounter", "bytes" ).set_unit_multiplier ( 1024 ), m_pRAMCounter->load ( std::memory_order_relaxed ) >> 10 );
}
DWORD RtSegment_t::GetMergeFactor() const
{
return m_uRows;
}
int RtSegment_t::GetStride () const NO_THREAD_SAFETY_ANALYSIS
{
return int ( m_dRows.GetLength() / m_uRows );
}
const CSphRowitem * RtSegment_t::FindAliveRow ( DocID_t tDocid ) const
{
RowID_t tRowID = GetRowidByDocid(tDocid);
if ( tRowID==INVALID_ROWID || m_tDeadRowMap.IsSet(tRowID) )
return nullptr;
return GetDocinfoByRowID(tRowID);
}
const CSphRowitem * RtSegment_t::GetDocinfoByRowID ( RowID_t tRowID ) const NO_THREAD_SAFETY_ANALYSIS
{
return m_dRows.GetLength() ? &m_dRows[tRowID*GetStride()] : nullptr;
}
RowID_t RtSegment_t::GetAliveRowidByDocid ( DocID_t tDocID ) const
{
RowID_t* pRowID = m_tDocIDtoRowID.Find ( tDocID );
if ( !pRowID || m_tDeadRowMap.IsSet ( *pRowID ) )
return INVALID_ROWID;
return *pRowID;
}
RowID_t RtSegment_t::GetRowidByDocid ( DocID_t tDocID ) const
{
RowID_t * pRowID = m_tDocIDtoRowID.Find(tDocID);
return pRowID ? *pRowID : INVALID_ROWID;
}
int RtSegment_t::Kill ( DocID_t tDocID )
{
if ( m_tDeadRowMap.Set ( GetRowidByDocid ( tDocID ) ) )
{
assert ( m_tAliveRows>0 );
m_tAliveRows.fetch_sub ( 1, std::memory_order_relaxed );
// on runtime killing expected only from single fiber, so no mutex here required as no race for pKillHook
KillHook ( tDocID );
return 1;
}
return 0;
}
int RtSegment_t::KillMulti ( const VecTraits_T<DocID_t> & dKlist )
{
int iTotalKilled = 0;
// fixme: implement more efficient batch killer
for ( auto iDocID : dKlist )
iTotalKilled += Kill ( iDocID );
return iTotalKilled;
}
void RtSegment_t::SetupDocstore ( const CSphSchema * pSchema )
{
assert ( !m_pDocstore );
m_pDocstore = CreateDocstoreRT();
assert ( m_pDocstore );
if ( pSchema )
SetupDocstoreFields ( *m_pDocstore, *pSchema );
}
void RtSegment_t::BuildDocID2RowIDMap ( const CSphSchema & tSchema )
{
m_tDocIDtoRowID.Reset(m_uRows);
if ( !tSchema.GetAttr(0).IsColumnar() )
{
int iStride = GetStride();
RowID_t tRowID = 0;
FakeRL_t _ {m_tLock}; // no need true lock as the func is in game during build/merge when segment is not yet published
for ( int i=0; i<m_dRows.GetLength(); i+=iStride )
m_tDocIDtoRowID.Add ( sphGetDocID ( &m_dRows[i] ), tRowID++ );
}
else
{
std::string sError;
auto pIt = CreateColumnarIterator ( m_pColumnar.get(), sphGetDocidName(), sError );
assert ( pIt );
for ( RowID_t tRowID = 0; tRowID<m_uRows; tRowID++ )
m_tDocIDtoRowID.Add ( pIt->Get(tRowID), tRowID );
}
}
//////////////////////////////////////////////////////////////////////////
class RtDocWriter_c
{
CSphTightVector<BYTE> & m_dDocs;
RowID_t m_tLastRowID {INVALID_ROWID};
public:
explicit RtDocWriter_c ( CSphTightVector<BYTE> & dDocs )
: m_dDocs ( dDocs )
{}
inline void operator<< ( const RtDoc_t & tDoc)
{
m_dDocs.ReserveGap ( 5 + 5 * sizeof ( DWORD ) );
ZipDword ( m_dDocs, tDoc.m_tRowID - std::exchange ( m_tLastRowID, tDoc.m_tRowID ) );
ZipDword ( m_dDocs, tDoc.m_uDocFields );
ZipDword ( m_dDocs, tDoc.m_uHits );
if ( tDoc.m_uHits == 1 )
{
ZipDword ( m_dDocs, tDoc.m_uHit & 0xffffffUL );
ZipDword ( m_dDocs, tDoc.m_uHit >> 24 );
} else
ZipDword ( m_dDocs, tDoc.m_uHit );
}
DWORD WriterPos () const
{
return m_dDocs.GetLength();
}
void ZipRestart ()
{
m_tLastRowID = INVALID_ROWID;
}
};
RtDocReader_c::RtDocReader_c ( const RtSegment_t * pSeg, const RtWord_t & tWord )
{
Init ( pSeg, tWord );
}
void RtDocReader_c::Init ( const RtSegment_t* pSeg, const RtWord_t& tWord )
{
m_pDocs = ( pSeg->m_dDocs.begin() ? pSeg->m_dDocs.begin() + tWord.m_uDoc : nullptr );
m_iLeft = tWord.m_uDocs;
m_tDoc.m_tRowID = INVALID_ROWID;
}
bool RtDocReader_c::UnzipDoc ()
{
if ( !m_iLeft || !m_pDocs )
return false;
const BYTE* pIn = m_pDocs;
m_tDoc.m_tRowID += UnzipDword ( pIn );
UnzipDword ( &m_tDoc.m_uDocFields, pIn );
UnzipDword ( &m_tDoc.m_uHits, pIn );
if ( m_tDoc.m_uHits == 1 )
{
auto a = UnzipDword ( pIn );
auto b = UnzipDword ( pIn );
m_tDoc.m_uHit = a + ( b << 24 );
} else
UnzipDword ( &m_tDoc.m_uHit, pIn );
m_pDocs = pIn;
--m_iLeft;
return true;
}
class RtWordWriter_c
{
CSphTightVector<BYTE> & m_dWords;
CSphVector<RtWordCheckpoint_t> & m_dCheckpoints;
CSphVector<BYTE> & m_dKeywordCheckpoints;
CSphKeywordDeltaWriter m_tLastKeyword;
SphWordID_t m_uLastWordID;
DWORD m_uLastDoc;
int m_iWords;
bool m_bKeywordDict;
int m_iWordsCheckpoint;
const ESphHitless m_eHitlessMode = SPH_HITLESS_NONE;
public:
RtWordWriter_c ( CSphTightVector<BYTE> & dWords, CSphVector<RtWordCheckpoint_t> & dCheckpoints,
CSphVector<BYTE> & dKeywordCheckpoints, bool bKeywordDict, int iWordsCheckpoint, ESphHitless eHitlessMode )
: m_dWords ( dWords )
, m_dCheckpoints ( dCheckpoints )
, m_dKeywordCheckpoints ( dKeywordCheckpoints )
, m_uLastWordID ( 0 )
, m_uLastDoc ( 0 )
, m_iWords ( 0 )
, m_bKeywordDict ( bKeywordDict )
, m_iWordsCheckpoint ( iWordsCheckpoint )
, m_eHitlessMode ( eHitlessMode )
{
assert ( dWords.IsEmpty() );
assert ( dCheckpoints.IsEmpty() );
assert ( dKeywordCheckpoints.IsEmpty() );
}
void operator<< ( const RtWord_t& tWord ) // we never chain calls, so void as return type is ok.
{
if ( ++m_iWords==m_iWordsCheckpoint )
{
RtWordCheckpoint_t & tCheckpoint = m_dCheckpoints.Add();
if ( !m_bKeywordDict )
tCheckpoint.m_uWordID = tWord.m_uWordID;
else
{
int iLen = tWord.m_sWord[0];
assert ( iLen && iLen-1<SPH_MAX_KEYWORD_LEN );
tCheckpoint.m_uWordID = sphPutBytes ( &m_dKeywordCheckpoints, tWord.m_sWord+1, iLen+1 );
m_dKeywordCheckpoints.Last() = '\0'; // checkpoint is NULL terminating string
// reset keywords delta encoding
m_tLastKeyword.Reset();
}
tCheckpoint.m_iOffset = m_dWords.GetLength();
m_uLastWordID = 0;
m_uLastDoc = 0;
m_iWords = 1;
}
if ( !m_bKeywordDict )
ZipWordid ( m_dWords, tWord.m_uWordID - m_uLastWordID );
else
m_tLastKeyword.PutDelta ( *this, tWord.m_sWord+1, tWord.m_sWord[0] );
m_dWords.ReserveGap ( 3+3*sizeof(DWORD) );
DWORD uDocs = tWord.m_uDocs;
if ( !tWord.m_bHasHitlist && m_eHitlessMode==SPH_HITLESS_SOME )
uDocs |= HITLESS_DOC_FLAG;
ZipDword ( m_dWords, uDocs );
ZipDword ( m_dWords, tWord.m_uHits );
ZipDword ( m_dWords, tWord.m_uDoc - m_uLastDoc );
m_uLastWordID = tWord.m_uWordID;
m_uLastDoc = tWord.m_uDoc;
}
void PutBytes ( const BYTE * pData, int iLen ) const
{
sphPutBytes ( &m_dWords, pData, iLen );
}
};
RtWordReader_c::RtWordReader_c ( const RtSegment_t * pSeg, bool bWordDict, int iWordsCheckpoint, ESphHitless eHitlessMode )
: m_bWordDict ( bWordDict )
, m_iWordsCheckpoint ( iWordsCheckpoint )
, m_eHitlessMode ( eHitlessMode )
{
m_tWord.m_uWordID = 0;
Reset ( pSeg );
if ( bWordDict )
m_tWord.m_sWord = m_tPackedWord;
}
void RtWordReader_c::Reset ( const RtSegment_t * pSeg )
{
m_pCur = pSeg->m_dWords.Begin();
m_pMax = m_pCur + pSeg->m_dWords.GetLength();
m_tWord.m_uDoc = 0;
m_iWords = 0;
}
const RtWord_t* RtWordReader_c::UnzipWord ()
{
if (m_pCur >= m_pMax)
return nullptr;
if ( ++m_iWords==m_iWordsCheckpoint )
{
m_tWord.m_uDoc = 0;
m_iWords = 1;
++m_iCheckpoint;
if ( !m_bWordDict )
m_tWord.m_uWordID = 0;
}
const BYTE * pIn = m_pCur;
if ( m_bWordDict )
{
// 1dddmmmm -> delta, match (packed into 1 byte)
// 0ddddddd mmmmmmmm -> delta, match (occupy 2 bytes)
// then delta bytes of symbols
// overwrites previous token: delta symbols starting from match. At the end (delta+match) put 0-terminator
BYTE iMatch, iDelta, uPacked;
uPacked = *pIn++;
if ( uPacked & 0x80 )
{
iDelta = ( ( uPacked>>4 ) & 7 ) + 1;
iMatch = uPacked & 15;
} else
{
iDelta = uPacked & 127;
iMatch = *pIn++;
}
m_tPackedWord[0] = iMatch+iDelta;
memcpy ( m_tPackedWord+1+iMatch, pIn, iDelta );
m_tPackedWord[1+m_tPackedWord[0]] = 0;
pIn += iDelta;
} else
m_tWord.m_uWordID += UnzipWordid ( pIn );
UnzipDword ( &m_tWord.m_uDocs, pIn );
UnzipDword ( &m_tWord.m_uHits, pIn );
m_tWord.m_uDoc += UnzipDword ( pIn );
m_tWord.m_bHasHitlist = ( m_eHitlessMode==SPH_HITLESS_NONE || ( m_eHitlessMode==SPH_HITLESS_SOME && !( m_tWord.m_uDocs & HITLESS_DOC_FLAG ) ) );
m_tWord.m_uDocs = ( m_eHitlessMode==SPH_HITLESS_NONE ? m_tWord.m_uDocs : ( m_tWord.m_uDocs & HITLESS_DOC_MASK ) );
m_pCur = pIn;
return &m_tWord;
}
class RtHitWriter_c
{
CSphTightVector<BYTE>& m_dHits;
DWORD m_uLastHit = 0;
public:
explicit RtHitWriter_c ( CSphTightVector<BYTE>& dHits )
: m_dHits ( dHits )
{}
inline void operator<< ( DWORD uValue )
{
ZipDword ( m_dHits, uValue - std::exchange ( m_uLastHit, uValue ) );
}
void ZipRestart()
{
m_uLastHit = 0;
}
DWORD WriterPos() const
{
return m_dHits.GetLength();
}
};
RtHitReader_c::RtHitReader_c ( const RtSegment_t& dSeg, const RtDoc_t& dDoc )
: m_pCur ( dSeg.m_dHits.begin() + dDoc.m_uHit)
, m_uLeft ( dDoc.m_uHits )
, m_uValue ( EMPTY_HIT )
{}
void RtHitReader_c::Seek ( const RtSegment_t& dSeg, const RtDoc_t& dDoc )
{
Seek ( dSeg.m_dHits.begin() + dDoc.m_uHit, dDoc.m_uHits );
}
void RtHitReader_c::Seek ( const BYTE* pHits, DWORD uHits )
{
m_pCur = pHits;
m_uLeft = uHits;
m_uValue = EMPTY_HIT;
}
DWORD RtHitReader_c::UnzipHit ()
{
if ( !m_pCur || !m_uLeft )
return EMPTY_HIT;
m_uValue += UnzipDword ( m_pCur );
--m_uLeft;
return m_uValue;
}
ByteBlob_t GetHitsBlob ( const RtSegment_t& tSeg, const RtDoc_t& tDoc )
{
const BYTE* pHits = &tSeg.m_dHits[tDoc.m_uHit];
const BYTE* pEnd = pHits;
for ( auto i = 0U; i < tDoc.m_uHits; ++i )
SkipZipped ( pEnd );
return { pHits, pEnd - pHits };
}
//////////////////////////////////////////////////////////////////////////
/// forward ref
class RtIndex_c;
struct ChunkStats_t
{
CSphSourceStats m_Stats;
CSphFixedVector<int64_t> m_dFieldLens { SPH_MAX_FIELDS };
void Init ( const CSphSourceStats & s, const CSphFixedVector<int64_t> & dLens )
{
assert ( m_dFieldLens.GetLength() == dLens.GetLength() );
m_Stats = s;
ARRAY_FOREACH ( i, dLens )
m_dFieldLens[i] = dLens[i];
}
ChunkStats_t () = default;
ChunkStats_t ( const CSphSourceStats & s, const CSphFixedVector<int64_t> & dLens )
{
Init ( s, dLens );
}
};
struct RtAttrMergeContext_t
{
int m_iNumBlobs;
RowID_t & m_tResultRowID;
columnar::Builder_i * m_pColumnarBuilder;
RtAttrMergeContext_t ( int iNumBlobs, RowID_t & tResultRowID, columnar::Builder_i * pColumnarBuilder )
: m_iNumBlobs ( iNumBlobs )
, m_tResultRowID ( tResultRowID )
, m_pColumnarBuilder ( pColumnarBuilder )
{}
};
struct RtQword_t;
struct SaveDiskDataContext_t;
// kind of 'mini served_desc' inside index - manages state of one disk chunk
class DiskChunk_c final : public ISphRefcountedMT
{
public:
mutable std::atomic<bool> m_bOptimizing { false }; // to protect from simultaneous optimizing one and same chunk
mutable bool m_bFinallyUnlink = false; // unlink index files on destroy
mutable Threads::Coro::RWLock_c m_tLock; // fine-grain lock between update and merge (optimize)
mutable std::atomic<int> m_iPendingUpdates {0}; // if right now some updates pending
inline static CSphRefcountedPtr<const DiskChunk_c> make ( CSphIndex* pIndex ) { return CSphRefcountedPtr<const DiskChunk_c> { pIndex ? new DiskChunk_c(pIndex) : nullptr }; }
inline static CSphRefcountedPtr<const DiskChunk_c> make ( std::unique_ptr<CSphIndex> pIndex ) { return CSphRefcountedPtr<const DiskChunk_c> { pIndex ? new DiskChunk_c(std::move(pIndex)) : nullptr }; }
explicit operator CSphIndex* () const { return m_pIndex; }
CSphIndex & Idx() { return *m_pIndex; }
CSphIndex & CastIdx () const { return *const_cast<CSphIndex *>(m_pIndex); } // const breakage!
const CSphIndex & Cidx() const { return *m_pIndex; }
protected:
~DiskChunk_c() final
{
if ( !m_pIndex )
return;
CSphString sDeleted = m_pIndex->GetFilebase ();
SafeDelete( m_pIndex );
if ( m_bFinallyUnlink )
sphUnlinkIndex ( sDeleted.cstr (), true );
}
private:
CSphIndex * m_pIndex;
DiskChunk_c ( CSphIndex * pIndex ) : m_pIndex ( pIndex ) {}
DiskChunk_c ( std::unique_ptr<CSphIndex> pIndex ) : m_pIndex ( pIndex.release() ) {}
};
using DiskChunkRefPtr_t = CSphRefcountedPtr<DiskChunk_c>;
using ConstDiskChunkRefPtr_t = CSphRefcountedPtr<const DiskChunk_c>;
using ConstRtSegmentSlice_t = VecTraits_T<ConstRtSegmentRefPtf_t>;
using DiskChunkSlice_t = VecTraits_T<DiskChunkRefPtr_t>;
template <typename CHUNK>
class RefCountedVec_T final : public ISphRefcountedMT, public LazyVector_T<CHUNK>
{
protected:
~RefCountedVec_T () final = default;
public:
RefCountedVec_T() = default;
using BASE = LazyVector_T<CHUNK>;
};
using DiskChunkVec_c = RefCountedVec_T<ConstDiskChunkRefPtr_t>;
using RtSegVec_c = RefCountedVec_T<ConstRtSegmentRefPtf_t>;
using DiskChunkVecRefPtr_t = CSphRefcountedPtr<DiskChunkVec_c>;
using RtSegVecRefPtr_t = CSphRefcountedPtr<RtSegVec_c>;
using ConstDiskChunkVecRefPtr_t = CSphRefcountedPtr<const DiskChunkVec_c>;
using ConstRtSegVecRefPtr_t = CSphRefcountedPtr<const RtSegVec_c>;
template<typename CHUNK>
CSphRefcountedPtr<RefCountedVec_T<CHUNK>> SliceToVec ( const VecTraits_T<CHUNK> & dSegments )
{
CSphRefcountedPtr<RefCountedVec_T<CHUNK>> dResult { new RefCountedVec_T<CHUNK> };
dResult->Reserve ( dSegments.GetLength () );
for ( const auto & dSeg : dSegments )
dResult->Add ( dSeg );
return dResult;
}
struct ConstRtData {
ConstDiskChunkVecRefPtr_t m_pChunks;
ConstRtSegVecRefPtr_t m_pSegs;
};
//using MutableRtData = std::pair<DiskChunkVecRefPtr_t, RtSegVecRefPtr_t>;
/*
class FiberPool_c
{
struct HelperFiber_t
{
CSphString m_sName;
Threads::RoledSchedulerSharedPtr_t m_tScheduler;
};
CSphFixedVector<HelperFiber_t> m_dFibers; // helpers to serialize update/change tasks
std::atomic<DWORD> m_iFiber;
public:
explicit FiberPool_c (int iHelpers)
: m_dFibers { iHelpers }
, m_iFiber {0}
{}
Threads::RoledSchedulerSharedPtr_t GetFiber ()
{
auto iIdx = m_iFiber.fetch_add ( 1, std::memory_order_relaxed ) % m_dFibers.GetLength();
auto& tSched = m_dFibers[iIdx].m_tScheduler;
if ( !tSched )
{
if ( m_dFibers[iIdx].m_sName.IsEmpty () )
m_dFibers[iIdx].m_sName.SetSprintf ( "FP_%d", iIdx );
tSched = MakeAloneScheduler ( Coro::CurrentScheduler (), m_dFibers[iIdx].m_sName.cstr () );
}
return tSched;
}
};
*/
// main dataset of RT index. Keeps disk chunks and RAM segments
// * provides read-only snapshots for searches
// * provides serialized access for changing chunks and segments
// * provides fiber workers for undependent processing
class RtData_c
{
mutable RwLock_t m_tLock; // very short-term
ConstDiskChunkVecRefPtr_t m_pChunks GUARDED_BY ( m_tLock );
ConstRtSegVecRefPtr_t m_pSegments GUARDED_BY ( m_tLock );
friend class RtWriter_c;
public:
RtData_c ()
{
m_pChunks = new DiskChunkVec_c;
m_pSegments = new RtSegVec_c;
}
~RtData_c () = default;
ConstDiskChunkRefPtr_t DiskChunkByID ( int iChunkID ) const
{
ScRL_t rLock ( m_tLock );
for ( auto& pChunk : *m_pChunks )
if ( pChunk->Cidx().m_iChunk == iChunkID )
return pChunk;
return ConstDiskChunkRefPtr_t (nullptr);
}
ConstDiskChunkRefPtr_t DiskChunkByIdx ( int iChunk ) const
{
ScRL_t rLock ( m_tLock );
if ( iChunk < 0 || iChunk >= m_pChunks->GetLength() )
return ConstDiskChunkRefPtr_t ( nullptr );
return ( *m_pChunks )[iChunk];
}
ConstDiskChunkVecRefPtr_t DiskChunks () const
{
ScRL_t rLock ( m_tLock );
return m_pChunks;
}
ConstRtSegVecRefPtr_t RamSegs () const
{
ScRL_t rLock ( m_tLock );
return m_pSegments;
}
ConstRtData RtData () const
{
ScRL_t rLock ( m_tLock );
return { m_pChunks, m_pSegments };
}
bool IsEmpty() const
{
ScRL_t rLock ( m_tLock );
return m_pChunks->IsEmpty() && m_pSegments->IsEmpty();
}
int GetRamSegmentsCount() const
{
ScRL_t rLock ( m_tLock );
return m_pSegments->GetLength();
}
int GetDiskChunksCount () const
{
ScRL_t rLock ( m_tLock );
return m_pChunks->GetLength ();
}
};
// helper for easier access to ConstRtData members
// note: that is pointer to CONST vector of CONST chunks everywhere, keep this constage from casts!
struct RtGuard_t
{
ConstRtData m_tSegmentsAndChunks;
const DiskChunkVec_c & m_dDiskChunks;
const RtSegVec_c & m_dRamSegs;
RtGuard_t ( RtGuard_t&& ) noexcept = default;
explicit RtGuard_t ( ConstRtData tData )
: m_tSegmentsAndChunks { std::move ( tData ) }
, m_dDiskChunks { *m_tSegmentsAndChunks.m_pChunks }
, m_dRamSegs { *m_tSegmentsAndChunks.m_pSegs }
{}
};
// created with null set of ram segments and disk chunks
// on d-tr any not-null set will replace chunks and segments from the owner
// Note, if you want to modify existing set, you NEED to guard some way period between reading old / writing modified
class RtWriter_c
{
public:
RtData_c& m_tOwner;
DiskChunkVecRefPtr_t m_pNewDiskChunks;
RtSegVecRefPtr_t m_pNewRamSegs;
Handler m_fnOnRamSegsChanged;
RtWriter_c ( RtWriter_c&& rhs ) noexcept = default;
RtWriter_c ( RtData_c & tOwner, Handler&& fnOnRamSegsChanged )
: m_tOwner ( tOwner )
, m_fnOnRamSegsChanged { std::move ( fnOnRamSegsChanged ) }
{}
~RtWriter_c()
{
if ( !m_pNewDiskChunks && !m_pNewRamSegs )
return;
{
ScWL_t wLock ( m_tOwner.m_tLock );
// use leak since we convert 'data*' to 'const data*' here.
if ( m_pNewDiskChunks )
m_tOwner.m_pChunks = m_pNewDiskChunks.Leak();
if ( !m_pNewRamSegs )
return;
m_tOwner.m_pSegments = m_pNewRamSegs.Leak();
}
m_fnOnRamSegsChanged();
}
enum Copy_e { copy };
enum Empty_e { empty };
void InitRamSegs ( Empty_e ) { m_pNewRamSegs = new RtSegVec_c (); }
void InitDiskChunks ( Empty_e ) { m_pNewDiskChunks = new DiskChunkVec_c (); }
void InitRamSegs ( Copy_e ) EXCLUDES ( m_tOwner.m_tLock )
{
InitRamSegs ( empty );
for ( const auto & pSeg : *m_tOwner.RamSegs() )
m_pNewRamSegs->Add ( pSeg );
}
void InitDiskChunks ( Copy_e ) EXCLUDES ( m_tOwner.m_tLock )
{
InitDiskChunks ( empty );
auto pChunks = m_tOwner.DiskChunks();
for ( const auto & pChunk : *pChunks )
m_pNewDiskChunks->Add ( pChunk );
}
ConstDiskChunkRefPtr_t PopDiskChunk () EXCLUDES ( m_tOwner.m_tLock )
{
InitDiskChunks ( empty );
auto pChunks = m_tOwner.DiskChunks();
if ( !pChunks->GetLength() )
return ConstDiskChunkRefPtr_t();
auto pHeadChunk = pChunks->First();
for ( int i=1; i<pChunks->GetLength(); i++ )
{
m_pNewDiskChunks->Add ( pChunks->At ( i ) );
}
return pHeadChunk;
}
};
class ChunkID_c
{
int m_iCh = -1;
public:
int MakeChunkId ( const RtData_c& tData )
{
if ( m_iCh < 0 )
tData.DiskChunks()->for_each ( [this] ( auto& pIdx ) { m_iCh = Max ( m_iCh, pIdx->Cidx().m_iChunk ); } );
++m_iCh;
return m_iCh;
}
int GetChunkId ( const RtData_c & tData ) const
{
int iChunkId = 0;
for ( const auto & tChunk : *tData.DiskChunks() )
{
iChunkId = Max ( iChunkId, tChunk->Cidx().m_iChunk );
};
return iChunkId;
}
};
class WorkerSchedulers_c
{
RoledSchedulerSharedPtr_t m_tSerialChunkAccess; // serialize changing chunks and segs vec
RoledSchedulerSharedPtr_t m_tChunkSaver; // scheduler for disk manipulations.
std::atomic<int> m_iNextOp { 1 };
public:
void InitWorkers()
{
if ( !m_tSerialChunkAccess )
{
m_tSerialChunkAccess = MakeAloneScheduler ( GlobalWorkPool(), "serial" );
#ifdef PERFETTO
// set name for tracing
auto tTrack = perfetto::Track::FromPointer ( &m_tSerialChunkAccess );
auto tDesc = tTrack.Serialize();
tDesc.set_name( SphSprintf ( "serial_%p", &m_tSerialChunkAccess ).cstr() );
perfetto::TrackEvent::SetTrackDescriptor ( tTrack, tDesc );
#endif
}
if ( !m_tChunkSaver )
{
m_tChunkSaver = WrapRawScheduler ( GlobalWorkPool(), "saver" );
#ifdef PERFETTO
// set name for tracing
auto tTrack = perfetto::Track::FromPointer ( &m_tChunkSaver );
auto tDesc = tTrack.Serialize();
tDesc.set_name ( SphSprintf ( "saver_%p", &m_tChunkSaver ).cstr() );
perfetto::TrackEvent::SetTrackDescriptor ( tTrack, tDesc );
#endif
}
}
#ifdef PERFETTO
~WorkerSchedulers_c()
{
if ( m_tSerialChunkAccess )
perfetto::TrackEvent::EraseTrackDescriptor ( perfetto::Track::FromPointer ( &m_tSerialChunkAccess ) );
if ( m_tChunkSaver )
perfetto::TrackEvent::EraseTrackDescriptor ( perfetto::Track::FromPointer ( &m_tChunkSaver ) );
}
#endif
Threads::SchedRole SerialChunkAccess() const RETURN_CAPABILITY ( m_tSerialChunkAccess )
{
return m_tSerialChunkAccess;
}
Threads::SchedRole SaveSegmentsWorker() const RETURN_CAPABILITY ( m_tChunkSaver )
{
return m_tChunkSaver;
}
inline int GetNextOpTicket()
{
auto iRes = m_iNextOp.fetch_add ( 1, std::memory_order_relaxed );
if ( !iRes ) // zero tag has special meaning, skip it
iRes = m_iNextOp.fetch_add ( 1, std::memory_order_relaxed );
return iRes;
}
};
CSphVector<int> GetChunkIds ( const VecTraits_T<DiskChunkRefPtr_t> & dChunks )
{
CSphVector<int> dIds;
dChunks.for_each ( [&dIds] ( const DiskChunkRefPtr_t & pIdx )
{
if ( !pIdx->m_bFinallyUnlink )
dIds.Add ( pIdx->Idx ().m_iChunk );
});
return dIds;
}
class SaveState_c
{
public:
enum States_e : BYTE {
ENABLED, // normal, saving possible
DISCARD, // disabled, current result will not be necessary (can escape to don't waste resources)
DISABLED, // disabled, current stage must be completed first
};
void SetState ( States_e eState )
{
if ( !Threads::IsInsideCoroutine() )
{ // call from naked worker, typically indextool
auto& t = const_cast<Value_t&> ( m_tValue.GetValueRef() );
t.m_eValue = eState;
return;
}
assert ( Threads::IsInsideCoroutine() );
m_tValue.ModifyValueAndNotifyAll ( [eState] ( Value_t& t )
{
t.m_eValue = eState;
if ( eState == States_e::ENABLED )
{
if ( t.m_iDisabledCounter > 0 )
--t.m_iDisabledCounter;
} else
++t.m_iDisabledCounter;
});
}
void SetShutdownFlag ()
{
m_tValue.ModifyValueAndNotifyAll ( [] ( Value_t& t ) { t.m_bShutdown = true; } );
}
bool ActiveStateIs ( States_e eValue ) const
{
if ( !Threads::IsInsideCoroutine() ) // call from naked worker, typically indextool
return m_tValue.GetValueRef().m_eValue == eValue;
if ( eValue == States_e::ENABLED )
return m_tValue.GetValueRef().m_iDisabledCounter == 0;
return m_tValue.GetValueRef().m_eValue == eValue;
}
// sleep and return true when state is enabled.
// sleep and return false if index's shutdown happened.
bool WaitEnabledOrShutdown () const noexcept
{
while (true) {
auto tVal = m_tValue.WaitForMs ( [&] ( const Value_t& tVal ) {
if ( tVal.m_bShutdown )
return true;
if ( tVal.m_eValue!=States_e::ENABLED )
return false;
return tVal.m_iDisabledCounter==0;
}, 10000 ); // time doesn't matter, as shutdown abandons all timers
if ( tVal.m_bShutdown || sphInterrupted() || tVal.m_iDisabledCounter==0 )
return !tVal.m_bShutdown;
}
}
int GetNumOfLocks() const noexcept
{
return m_tValue.GetValueRef().m_iDisabledCounter;
}
private:
struct Value_t
{
States_e m_eValue = SaveState_c::ENABLED;
int m_iDisabledCounter = 0;
bool m_bShutdown = false;
};
Coro::Waitable_T<Value_t> m_tValue;
};
enum class MergeSeg_e : BYTE
{
NONE = 0, // idle
KILLED = 1, // kill happened
NEWSEG = 2, // insertion happened
EXIT = 4, // shutdown and exit
};
class RtIndex_c final : public RtIndex_i, public ISphNoncopyable, public ISphWordlist, public ISphWordlistSuggest, public IndexAlterHelper_c, public DebugCheckHelper_c
{
public:
RtIndex_c ( CSphString sIndexName, CSphString sPath, CSphSchema tSchema, int64_t iRamSize, bool bKeywordDict );
~RtIndex_c () final;
bool AddDocument ( InsertDocData_c & tDoc, bool bReplace, const CSphString & sTokenFilterOptions, CSphString & sError, CSphString & sWarning, RtAccum_t * pAccExt ) override;
virtual bool AddDocument ( ISphHits * pHits, const InsertDocData_c & tDoc, bool bReplace, const DocstoreBuilder_i::Doc_t * pStoredDoc, CSphString & sError, CSphString & sWarning, RtAccum_t * pAccExt );
bool DeleteDocument ( const VecTraits_T<DocID_t> & dDocs, CSphString & sError, RtAccum_t * pAccExt ) final;
bool Commit ( int * pDeleted, RtAccum_t * pAccExt, CSphString* pError = nullptr ) final;
void RollBack ( RtAccum_t * pAccExt ) final;
bool CommitReplayable ( RtSegment_t * pNewSeg, const VecTraits_T<DocID_t> & dAccKlist, int64_t iAddTotalBytes, int & iTotalKilled, CSphString & sError );
void ForceRamFlush ( const char * szReason ) final;
bool IsFlushNeed() const final;
bool ForceDiskChunk() final;
bool AttachDiskIndex ( CSphIndex * pIndex, bool bTruncate, bool & bFatal, CSphString & sError ) final;
bool AttachRtIndex ( RtIndex_i * pIndex, bool bTruncate, bool & bFatal, CSphString & sError ) final;
bool Truncate ( CSphString & sError, Truncate_e eAction ) final;
bool CheckValidateOptimizeParams ( OptimizeTask_t& tTask ) const;
bool CheckValidateChunk ( int& iChunk, int iChunks, bool bByOrder ) const;
bool StartOptimize ( OptimizeTask_t tTask ) final;
int OptimizesRunning() const noexcept final;
int GetNumOfLocks() const noexcept final;
void Optimize ( OptimizeTask_t tTask ) final;
void CheckStartAutoOptimize ();
int ClassicOptimize ();
int ProgressiveOptimize ( int iCutoff );
int CommonOptimize ( OptimizeTask_t tTask );
void DropDiskChunk ( int iChunk, int* pAffected=nullptr );
bool CompressOneChunk ( int iChunk, int* pAffected = nullptr );
bool DedupOneChunk ( int iChunk, int* pAffected = nullptr );
bool MergeTwoChunks ( int iA, int iB, int* pAffected, CSphString* sLog = nullptr );
bool MergeCanRun () const;
bool SplitOneChunk ( int iChunkID, const char* szUvarFilter, int* pAffected = nullptr );
bool SplitOneChunkFast ( int iChunkID, const char * szUvarFilter, bool& bResult, int* pAffected = nullptr );
int ChunkIDByChunkIdx (int iChunkIdx) const;
int64_t GetCountDistinct ( const CSphString & sAttr, CSphString & sModifiedAttr ) const override;
int64_t GetCountFilter ( const CSphFilterSettings & tFilter, CSphString & sModifiedAttr ) const override;
int64_t GetCount() const override;
std::pair<int64_t,int> GetPseudoShardingMetric ( const VecTraits_T<const CSphQuery> & dQueries, const VecTraits_T<int64_t> & dMaxCountDistinct, int iThreads, bool & bForceSingleThread ) const override;
// helpers
ConstDiskChunkRefPtr_t MergeDiskChunks ( const char* szParentAction, const ConstDiskChunkRefPtr_t& pChunkA, const ConstDiskChunkRefPtr_t& pChunkB, CSphIndexProgress& tProgress, VecTraits_T<CSphFilterSettings> dFilters );
bool PublishMergedChunks ( const char * szParentAction,std::function<bool ( int, DiskChunkVec_c & )> && fnPusher) REQUIRES ( m_tWorkers.SerialChunkAccess() );
bool RenameOptimizedChunk ( const ConstDiskChunkRefPtr_t& pChunk, const char * szParentAction );
bool SkipOrDrop ( int iChunk, const CSphIndex& dChunk, bool bCheckAlive, int* pAffected = nullptr );
void ProcessDiskChunk ( int iChunk, VisitChunk_fn&& fnVisitor ) const final;
template <typename VISITOR>
void ProcessDiskChunkByID ( int iChunkID, VISITOR&& fnVisitor ) const;
template <typename VISITOR>
void ProcessDiskChunkByID ( VecTraits_T<int> dChunkIDs, VISITOR&& fnVisitor ) const;
TokenizerRefPtr_c CloneIndexingTokenizer() const final { return m_pTokenizerIndexing->Clone ( SPH_CLONE_INDEX ); }
void SetKillHookFor ( IndexSegment_c* pAccum, int iDiskChunkID ) const;
void SetKillHookFor ( IndexSegment_c* pAccum, VecTraits_T<int> dDiskChunkIDs ) const;
Binlog::CheckTnxResult_t ReplayTxn ( CSphReader& tReader, CSphString & sError, BYTE uOp, Binlog::CheckTxn_fn&& fnCanContinue ) override; // cb from binlog
Binlog::CheckTnxResult_t ReplayCommit ( CSphReader & tReader, CSphString & sError, Binlog::CheckTxn_fn && fnCanContinue );
public:
#if _WIN32
#pragma warning(push,1)
#pragma warning(disable:4100)
#endif
int Kill ( DocID_t tDocID ) final;
int KillMulti ( const VecTraits_T<DocID_t> & dKlist ) final;
bool IsAlive ( DocID_t tDocID ) const final;
bool Prealloc ( bool bStripPath, FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings ) final;
void Preread () final;
void PostSetup() final;
bool IsRT() const final { return true; }
int CheckThenUpdateAttributes ( AttrUpdateInc_t & tUpd, bool & bCritical, CSphString & sError, CSphString & sWarning ) final;
bool SaveAttributes ( CSphString & sError ) const final;
DWORD GetAttributeStatus () const final { return m_uDiskAttrStatus; }
bool AddRemoveAttribute ( bool bAdd, const AttrAddRemoveCtx_t & tCtx, CSphString & sError ) final;
bool AddRemoveField ( bool bAdd, const CSphString & sFieldName, DWORD uFieldFlags, CSphString & sError ) final;
int DebugCheck ( DebugCheckError_i & , FilenameBuilder_i * pFilenameBuilder ) final;
#if _WIN32
#pragma warning(pop)
#endif
bool EarlyReject ( CSphQueryContext * pCtx, CSphMatch & ) const final;
const CSphSourceStats & GetStats () const final { return m_tStats; }
int64_t * GetFieldLens() const final { return m_tSettings.m_bIndexFieldLens ? m_dFieldLens.Begin() : nullptr; }
void GetStatus ( CSphIndexStatus* ) const final;
bool MultiQuery ( CSphQueryResult& tResult, const CSphQuery& tQuery, const VecTraits_T<ISphMatchSorter*>& dAllSorters, const CSphMultiQueryArgs& tArgs ) const final;
bool DoGetKeywords ( CSphVector<CSphKeywordInfo>& dKeywords, const char* sQuery, const GetKeywordsSettings_t& tSettings, bool bFillOnly, CSphString* pError, const RtGuard_t& tGuard ) const;
bool GetKeywords ( CSphVector<CSphKeywordInfo>& dKeywords, const char* sQuery, const GetKeywordsSettings_t& tSettings, CSphString* pError ) const final;
bool FillKeywords ( CSphVector <CSphKeywordInfo> & dKeywords ) const final;
void AddKeywordStats ( BYTE* sWord, const BYTE* sTokenized, const DictRefPtr_c& pDict, bool bGetStats, int iQpos, RtQword_t* pQueryWord, CSphVector<CSphKeywordInfo>& dKeywords, const RtSegVec_c& dRamSegs ) const;
bool RtQwordSetup ( RtQword_t * pQword, int iSeg, const RtGuard_t& tGuard ) const;
bool RtQwordSetupSegment ( RtQword_t* pQword, const RtSegment_t* pCurSeg, bool bSetup ) const;
bool IsWordDict () const { return m_bKeywordDict; }
int GetWordCheckoint() const { return m_iWordsCheckpoint; }
int GetMaxCodepointLength() const { return m_iMaxCodepointLength; }
bool IsSameSettings ( CSphReconfigureSettings & tSettings, CSphReconfigureSetup & tSetup, StrVec_t & dWarnings, CSphString & sError ) const final;
bool Reconfigure ( CSphReconfigureSetup & tSetup ) final;
int64_t GetLastFlushTimestamp() const final;
void IndexDeleted() final { m_bIndexDeleted = true; }
void ProhibitSave() final;
void EnableSave() final;
void LockFileState ( CSphVector<CSphString> & dFiles ) final;
void SetDebugCheck ( bool bCheckIdDups, int iCheckChunk ) final;
void CreateReader ( int64_t iSessionId ) const final;
bool GetDoc ( DocstoreDoc_t & tDoc, DocID_t tDocID, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const final;
int GetFieldId ( const CSphString & sName, DocstoreDataType_e eType ) const final;
Bson_t ExplainQuery ( const CSphString & sQuery ) const final;
uint64_t GetSchemaHash () const final { return m_uSchemaHash; }
protected:
CSphSourceStats m_tStats;
bool m_bDebugCheck = false;
bool m_bCheckIdDups = false;
int m_iCheckChunk = -1;
CSphFixedVector<int> m_dChunkNames { 0 };
private:
static const DWORD META_HEADER_MAGIC = 0x54525053; ///< my magic 'SPRT' header
// NOTICE! meta version 21 was introduced in 2a6ea8f7 and rolled back to 20 in e1709760.
// if you need to upgrade - skip v21 and use v22.
static constexpr DWORD META_VERSION = 20; // next should be 22
//< current version. since 20 we now store meta in json fixme! Also change version in indextool.cpp, and support the changes!
int m_iStride;
uint64_t m_uSchemaHash = 0;
std::atomic<int64_t> m_iRamChunksAllocatedRAM { 0 };
std::atomic<bool> m_bOptimizeStop { false };
Coro::Waitable_T<int> m_tOptimizeRuns {0};
friend class OptimizeGuard_c;
int64_t m_iRtMemLimit;
int64_t m_iSoftRamLimit;
double m_fSaveRateLimit { INITIAL_SAVE_RATE_LIMIT };
bool m_bPathStripped = false;
int m_iLockFD = -1;
ChunkID_c m_tChunkID;
RtData_c m_tRtChunks; // that is main set of disk chunks and RAM segments
WorkerSchedulers_c m_tWorkers;
Coro::Waitable_T<int> m_tUnLockedSegments { 0 }; // how many segments are not participating in any locked ops (like merge, save to disk).
Coro::Waitable_T<MergeSeg_e> m_eSegMergeQueued { MergeSeg_e::NEWSEG };
Coro::Waitable_T<bool> m_bSegMergeWorking { false };
Coro::Waitable_T<CSphVector<int64_t>> m_tSaveTIDS { 0 }; // save operations performing now, and their TIDs
int m_iSaveGeneration = 0; // SaveDiskChunk() increases generation on finish
Coro::Waitable_T<int> m_tNSavesNow { 0 }; // N of merge segment routines running right now
bool m_bIndexDeleted = false;
int64_t m_iSavedTID = 0;
int64_t m_tmSaved;
mutable DWORD m_uDiskAttrStatus = 0;
bool m_bKeywordDict;
int m_iWordsCheckpoint = RTDICT_CHECKPOINT_V5;
int m_iMaxCodepointLength = 0;
TokenizerRefPtr_c m_pTokenizerIndexing;
bool m_bPreallocPassedOk = true;
SaveState_c m_tSaving;
bool m_bHasFiles = false;
// fixme! make this *Lens atomic together with disk/ram data, to avoid any kind of race among them
CSphFixedVector<int64_t> m_dFieldLens { SPH_MAX_FIELDS }; ///< total field lengths over entire index
CSphFixedVector<int64_t> m_dFieldLensRam { SPH_MAX_FIELDS }; ///< field lengths summed over current RAM chunk
CSphFixedVector<int64_t> m_dFieldLensDisk { SPH_MAX_FIELDS }; ///< field lengths summed over all disk chunks
CSphVector<SphWordID_t> m_dHitlessWords;
std::unique_ptr<DocstoreFields_i> m_pDocstoreFields; // rt index doesn't have its own docstore, but it must keep all fields to get their ids for GetDoc
mutable int m_iTrackFailedRamActions;
int m_iAlterGeneration = 0; // increased every time index altered
bool BindAccum ( RtAccum_t * pAccExt, CSphString* pError = nullptr ) final;
int CompareWords ( const RtWord_t * pWord1, const RtWord_t * pWord2 ) const;
CSphFixedVector<RowID_t> CopyAttributesFromAliveDocs ( RtSegment_t& tDstSeg, const RtSegment_t & tSrcSeg, RtAttrMergeContext_t & tCtx ) const REQUIRES ( tDstSeg.m_tLock ) REQUIRES (m_tWorkers.SerialChunkAccess());
void MergeKeywords ( RtSegment_t & tSeg, const RtSegment_t & tSeg1, const RtSegment_t & tSeg2, const VecTraits_T<RowID_t> & dRowMap1, const VecTraits_T<RowID_t> & dRowMap2 ) const;
RtSegment_t * MergeTwoSegments ( const RtSegment_t * pA, const RtSegment_t * pB ) const REQUIRES (m_tWorkers.SerialChunkAccess());
static void CopyWord ( RtSegment_t& tDstSeg, RtWord_t& tDstWord, RtDocWriter_c& tDstDoc, const RtSegment_t& tSrcSeg, const RtWord_t* pSrcWord, const VecTraits_T<RowID_t>& dRowMap );
void DeleteFieldFromDict ( RtSegment_t * pSeg, int iKillField );
void AddFieldToRamchunk ( const CSphString & sFieldName, DWORD uFieldFlags, const CSphSchema & tOldSchema, const CSphSchema & tNewSchema );
void RemoveFieldFromRamchunk ( const CSphString & sFieldName, const CSphSchema & tOldSchema, const CSphSchema & tNewSchema );
void AddRemoveFromRamDocstore ( const CSphSchema & tOldSchema, const CSphSchema & tNewSchema );
bool LoadMeta ( FilenameBuilder_i * pFilenameBuilder, bool bStripPath, DWORD & uVersion, bool & bRebuildInfixes, StrVec_t & dWarnings );
bool LoadMetaImpl ( FilenameBuilder_i * pFilenameBuilder, bool bStripPath, DWORD & uVersion, bool & bRebuildInfixes, StrVec_t & dWarnings );
enum class LOAD_E { ParseError_e, GeneralError_e, Ok_e };
LOAD_E LoadMetaJson ( FilenameBuilder_i * pFilenameBuilder, bool bStripPath, DWORD & uVersion, bool & bRebuildInfixes, StrVec_t & dWarnings );
LOAD_E LoadMetaLegacy ( FilenameBuilder_i * pFilenameBuilder, bool bStripPath, DWORD & uVersion, bool & bRebuildInfixes, StrVec_t & dWarnings );
bool PreallocDiskChunks ( FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings );
void SaveMeta ( int64_t iTID, VecTraits_T<int> dChunkNames );
void SaveMeta ();
bool SaveDiskHeader ( SaveDiskDataContext_t & tCtx, const ChunkStats_t & tStats, CSphString & sError ) const;
bool SaveDiskData ( const char * szFilename, const ConstRtSegmentSlice_t & tSegs, const ChunkStats_t & tStats, CSphString & sError ) const;
bool SaveDiskChunk ( bool bForced, bool bEmergent=false, bool bBootstrap=false ) REQUIRES ( m_tWorkers.SerialChunkAccess() );
std::unique_ptr<CSphIndex> PreallocDiskChunk ( const CSphString& sChunk, int iChunk, FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings, CSphString & sError, const char * szName=nullptr ) const;
bool LoadRamChunk ( DWORD uVersion, bool bRebuildInfixes, bool bFixup = true );
bool SaveRamChunk ();
bool WriteAttributes ( SaveDiskDataContext_t & tCtx, CSphString & sError ) const;
bool WriteDocs ( SaveDiskDataContext_t & tCtx, CSphWriter & tWriterDict, CSphString & sError ) const;
void WriteCheckpoints ( SaveDiskDataContext_t & tCtx, CSphWriter & tWriterDict ) const;
static bool WriteDeadRowMap ( SaveDiskDataContext_t & tCtx, CSphString & sError );
void GetPrefixedWords ( const char * sSubstring, int iSubLen, const char * sWildcard, Args_t & tArgs ) const final;
void GetInfixedWords ( const char * sSubstring, int iSubLen, const char * sWildcard, Args_t & tArgs ) const final;
void ScanRegexWords ( const VecTraits_T<RegexTerm_t> & dTerms, const ISphWordlist::Args_t & tArgs, const VecExpandConv_t & dConverters ) const final;
void GetSuggest ( const SuggestArgs_t & tArgs, SuggestResult_t & tRes ) const final;
void SuffixGetChekpoints ( const SuggestResult_t & tRes, const char * sSuffix, int iLen, CSphVector<DWORD> & dCheckpoints ) const final;
void SetCheckpoint ( SuggestResult_t & tRes, DWORD iCP ) const final;
bool ReadNextWord ( SuggestResult_t & tRes, DictWord_t & tWord ) const final;
ConstRtSegmentRefPtf_t AdoptSegment ( RtSegment_t * pNewSeg );
int ApplyKillList ( const VecTraits_T<DocID_t> & dAccKlist ) REQUIRES ( m_tWorkers.SerialChunkAccess() );
bool AddRemoveColumnarAttr ( RtGuard_t & tGuard, bool bAdd, const CSphString & sAttrName, ESphAttr eAttrType, const CSphSchema & tOldSchema, const CSphSchema & tNewSchema, CSphString & sError );
void AddRemoveRowwiseAttr ( RtGuard_t & tGuard, bool bAdd, const CSphString & sAttrName, ESphAttr eAttrType, const CSphSchema & tOldSchema, const CSphSchema & tNewSchema, CSphString & sError );
bool Update_DiskChunks ( AttrUpdateInc_t& tUpd, const DiskChunkSlice_t& dDiskChunks, CSphString& sError ) REQUIRES ( m_tWorkers.SerialChunkAccess() );
void GetIndexFiles ( StrVec_t& dFiles, StrVec_t& dExt, const FilenameBuilder_i* = nullptr ) const override;
DocstoreBuilder_i::Doc_t * FetchDocFields ( DocstoreBuilder_i::Doc_t & tStoredDoc, const InsertDocData_c & tDoc, CSphSource_StringVector & tSrc, CSphVector<CSphVector<BYTE>> & dTmpAttrStorage ) const;
void UnlinkRAMChunk ( const char * szInfo=nullptr );
void WaitRAMSegmentsUnlocked ( bool bAllowOne = false ) const REQUIRES ( m_tWorkers.SerialChunkAccess() );
bool MergeSegmentsStep( MergeSeg_e eVal ) REQUIRES ( m_tWorkers.SerialChunkAccess() );
void RunMergeSegmentsWorker();
void StartMergeSegments ( MergeSeg_e eMergeWhat, bool bNotify=true ) REQUIRES ( m_tWorkers.SerialChunkAccess() );
void StopMergeSegmentsWorker() REQUIRES ( m_tWorkers.SerialChunkAccess() );
bool NeedStoreWordID () const override;
int64_t GetMemLimit() const final { return m_iRtMemLimit; }
bool VerifyKNN ( InsertDocData_c & tDoc, CSphString & sError ) const;
template<typename PRED>
int64_t GetMemCount(PRED&& fnPred) const;
void DebugCheckRam ( DebugCheckError_i & tReporter );
int DebugCheckDisk ( DebugCheckError_i & tReporter );
void SetSchema ( CSphSchema tSchema );
void SetMemLimit ( int64_t iMemLimit );
void RecalculateRateLimit ( int64_t iSaved, int64_t iInserted, bool bEmergent );
void AlterSave ( bool bSaveRam );
bool BinlogCommit ( RtSegment_t * pSeg, const VecTraits_T<DocID_t> & dKlist, int64_t iAddTotalBytes, CSphString & sError );
bool StopOptimize();
void UpdateUnlockedCount();
bool CheckSegmentConsistency ( const RtSegment_t* pNewSeg, bool bSilent=true ) const;
// internal helpers/hooks
inline RtWriter_c RtWriter() { return { m_tRtChunks, [this] { UpdateUnlockedCount(); } }; }
// set of my rt; suitable for any usage
inline RtGuard_t RtGuard() const { return RtGuard_t { RtData() }; }
// my own, or external data, if any present
inline ConstRtData RtData() const { return m_tRtChunks.RtData(); }
void DebugCheckRamSegment ( const RtSegment_t & tSegment, int iSegment, DebugCheckError_i & tReporter ) const;
void SaveRamFieldLengths ( CSphWriter& wrChunk ) const;
void SaveRamSegment ( const RtSegment_t* pSeg, CSphWriter& wrChunk ) const REQUIRES_SHARED ( pSeg->m_tLock );
void WriteMeta ( int64_t iTID, const VecTraits_T<int>& dChunkNames, CSphWriter& wrMeta ) const;
CSphString MakeDamagedName () const;
void DumpSegments ( VecTraits_T<const RtSegment_t*> dSegments, const CSphString& sFile ) const;
void DumpSegment ( const RtSegment_t* pSeg, const CSphString& sFile ) const;
void DumpMeta ( const CSphString& sFile ) const;
void DumpInsert ( const RtSegment_t* pNewSeg ) const;
void DumpMerge ( const RtSegment_t* pA, const RtSegment_t* pB, const RtSegment_t* pNew ) const;
// Manage alter state
void RaiseAlterGeneration();
int GetAlterGeneration() const override;
bool AlterSI ( CSphString & sError ) override;
bool CanAttach ( const CSphIndex * pIndex, CSphString & sError ) const;
bool AttachDiskChunkMove ( CSphIndex * pIndex, bool & bFatal, CSphString & sError ) REQUIRES ( m_tWorkers.SerialChunkAccess() );
void AttachSetSettings ( CSphIndex * pIndex );
bool AttachSaveDiskChunk ();
ConstDiskChunkRefPtr_t PopDiskChunk();
int GetChunkId () const override { return m_tChunkID.GetChunkId ( m_tRtChunks ); }
void SetGlobalIDFPath ( const CSphString & sPath ) override;
void DebugDumpDict ( FILE * fp, bool bDumpOnly ) final;
};
RtIndex_c::RtIndex_c ( CSphString sIndexName, CSphString sPath, CSphSchema tSchema, int64_t iRamSize, bool bKeywordDict )
: RtIndex_i { std::move ( sIndexName ), std::move ( sPath ) }
, m_tmSaved ( sphMicroTimer() )
, m_bKeywordDict ( bKeywordDict )
, m_iTrackFailedRamActions {0}
{
MEMORY ( MEM_INDEX_RT );
SetSchema ( std::move ( tSchema ) );
SetMemLimit ( iRamSize );
auto iTrack = val_from_env ( "MANTICORE_TRACK_RT_ERRORS",-1 );
if ( iTrack>0 )
{
m_iTrackFailedRamActions = iTrack;
sphInfo ( "MANTICORE_TRACK_RT_ERRORS env provided; up to %d insert/merge errors will be reported", m_iTrackFailedRamActions );
}
}
RtIndex_c::~RtIndex_c ()
{
if ( IsInsideCoroutine())
{
// From serial worker resuming on Wait() will happen after whole merger coroutine finished.
ScopedScheduler_c tSerialFiber { m_tWorkers.SerialChunkAccess() };
TRACE_SCHED ( "rt", "~RtIndex_c" );
m_tSaving.SetShutdownFlag ();
if ( Threads::IsInsideCoroutine() )
{
Threads::Coro::Reschedule();
StopMergeSegmentsWorker();
}
m_tNSavesNow.Wait ( [] ( int iVal ) { return iVal==0; } );
}
int64_t tmSave = sphMicroTimer();
bool bValid = m_pTokenizer && m_pDict && m_bPreallocPassedOk;
if ( bValid )
bValid &= SaveRamChunk();
if ( bValid )
SaveMeta();
if ( m_iLockFD>=0 )
::close ( m_iLockFD );
if ( bValid )
Binlog::NotifyIndexFlush ( m_iTID, GetName(), (Binlog::Shutdown_e)sphInterrupted(), (Binlog::ForceSave_e)m_bIndexDeleted );
if ( m_bIndexDeleted )
{
CSphString sFile = GetFilename ( "meta" );
::unlink ( sFile.cstr() );
sFile = GetFilename ( "ram" );
::unlink ( sFile.cstr() );
sFile = GetFilename ( SPH_EXT_SETTINGS );
::unlink ( sFile.cstr() );
}
if ( !bValid )
return;
tmSave = sphMicroTimer() - tmSave;
if ( tmSave>=1000 )
sphInfo ( "rt: table %s: ramchunk saved in %d.%03d sec", GetName(), (int)(tmSave/1000000), (int)((tmSave/1000)%1000) );
if ( !sphInterrupted() )
sphLogDebug ( "closed table %s, valid %d, deleted %d, time %d.%03d sec", GetName(), (int)bValid, (int)m_bIndexDeleted, (int)(tmSave/1000000), (int)((tmSave/1000)%1000) );
}
void RtIndex_c::RaiseAlterGeneration()
{
++m_iAlterGeneration;
}
int RtIndex_c::GetAlterGeneration() const
{
return m_iAlterGeneration;
}
void RtIndex_c::UpdateUnlockedCount()
{
if ( m_bDebugCheck )
return;
m_tUnLockedSegments.UpdateValueAndNotifyAll ( (int)m_tRtChunks.RamSegs()->count_of ( [] ( auto& dSeg ) { return !dSeg->m_iLocked; } ) );
}
void RtIndex_c::ProcessDiskChunk ( int iChunk, VisitChunk_fn&& fnVisitor ) const
{
auto pDiskChunks = m_tRtChunks.DiskChunks();
if ( iChunk < 0 || iChunk >= pDiskChunks->GetLength() )
fnVisitor ( nullptr );
else
fnVisitor ( &( *pDiskChunks )[iChunk]->Cidx() );
}
template<typename VISITOR>
void RtIndex_c::ProcessDiskChunkByID ( int iChunkID, VISITOR&& fnVisitor ) const
{
auto pChunks = m_tRtChunks.DiskChunks();
for ( const auto& pChunk : *pChunks )
if ( iChunkID == pChunk->Cidx().m_iChunk )
{
fnVisitor ( pChunk );
break;
}
}
template<typename VISITOR>
void RtIndex_c::ProcessDiskChunkByID ( VecTraits_T<int> dChunkIDs, VISITOR&& fnVisitor ) const
{
auto pChunks = m_tRtChunks.DiskChunks();
for ( int iDiskChunkID : dChunkIDs )
for ( const auto& pChunk : *pChunks )
if ( iDiskChunkID == pChunk->Cidx().m_iChunk )
{
fnVisitor ( pChunk );
break;
}
}
bool RtIndex_c::IsFlushNeed() const
{
// m_iTID get managed by binlog that is why wo binlog there is no need to compare it
if ( Binlog::IsActive () && m_iTID>=0 && m_iTID<=m_iSavedTID )
return false;
return m_tSaving.ActiveStateIs ( SaveState_c::ENABLED );
}
static int64_t SegmentsGetUsedRam ( const ConstRtSegmentSlice_t& dSegments )
{
int64_t iTotal = 0;
for ( const RtSegment_t * pSeg : dSegments )
iTotal += pSeg->GetUsedRam();
return iTotal;
}
static int64_t SegmentsGetDeadRows ( const VecTraits_T<RtSegmentRefPtf_t> & dSegments )
{
int64_t iTotal = 0;
for ( RtSegment_t * pSeg : dSegments )
iTotal += pSeg->m_tDeadRowMap.GetNumDeads();
return iTotal;
}
// save RAM chunk to .ram, flush disk chunks dead-row-maps.
void RtIndex_c::ForceRamFlush ( const char* szReason )
{
if ( !IsFlushNeed() )
return;
int64_t tmSave = sphMicroTimer();
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_SCHED ( "rt", "ForceRamFlush" );
int64_t iUsedRam = SegmentsGetUsedRam ( *m_tRtChunks.RamSegs() );
if ( !SaveRamChunk () )
{
sphWarning ( "rt: table %s: ramchunk save FAILED! (error=%s)", GetName(), m_sLastError.cstr() );
return;
}
SaveMeta();
auto pChunks = m_tRtChunks.DiskChunks();
pChunks->for_each ( [] ( ConstDiskChunkRefPtr_t & pIdx ) { pIdx->Cidx().FlushDeadRowMap ( true ); } );
Binlog::NotifyIndexFlush ( m_iTID, GetName(), Binlog::NoShutdown, Binlog::NoSave );
int64_t iWasTID = std::exchange ( m_iSavedTID, m_iTID );
auto tmNow = sphMicroTimer ();
int64_t tmDelta = tmNow-std::exchange ( m_tmSaved, tmNow );
tmSave = tmNow-tmSave;
sphInfo ( "rt: table %s: ramchunk saved ok (mode=%s, last TID=" INT64_FMT ", current TID=" INT64_FMT ", "
"ram=%d.%03d Mb, time delta=%d sec, took=%d.%03d sec)",
GetName(), szReason, iWasTID, m_iTID, (int)(iUsedRam/1024/1024), (int)((iUsedRam/1024)%1000)
, (int) (tmDelta/1000000), (int)(tmSave/1000000), (int)((tmSave/1000)%1000) );
}
int64_t RtIndex_c::GetLastFlushTimestamp() const
{
return m_tmSaved;
}
//////////////////////////////////////////////////////////////////////////
// INDEXING
//////////////////////////////////////////////////////////////////////////
CSphSource_StringVector::CSphSource_StringVector ( const VecTraits_T<VecTraits_T<const char >> &dFields, const CSphSchema & tSchema )
: CSphSource ( "$blobvector" )
{
m_tSchema = tSchema;
m_dFieldLengths.Reserve ( dFields.GetLength () );
m_dFields.Reserve ( dFields.GetLength() + 1 );
for ( const auto& dField : dFields )
{
m_dFields.Add ( (BYTE*)const_cast<char*> ( dField.begin() ) );
m_dFieldLengths.Add ( dField.GetLength () );
assert ( dField.begin() || dField.IsEmpty () );
}
m_dFields.Add (nullptr);
m_iMaxHits = 0; // force all hits build
}
bool CSphSource_StringVector::Connect ( CSphString & )
{
// no AddAutoAttrs() here; they should already be in the schema
m_tHits.Reserve ( 1024 );
return true;
}
void CSphSource_StringVector::Disconnect ()
{
m_tHits.Reset();
}
template <typename T>
static void StoreAttrValue ( const InsertDocData_c & tDoc, const CSphColumnInfo & tAttr, int iColumnarAttr, int iStoredAttr, VecTraits_T<BYTE> * pAddedAttrs, CSphVector<BYTE> & dTmpStorage )
{
T tValue = 0;
if ( tAttr.IsColumnar() )
tValue = (T)tDoc.m_dColumnarAttrs[iColumnarAttr];
else
tValue = (T)sphGetRowAttr ( tDoc.m_tDoc.m_pDynamic, tAttr.m_tLocator );
int iBits = tAttr.m_tLocator.m_iBitCount;
if ( tAttr.m_eAttrType==SPH_ATTR_BOOL )
tValue = tValue ? 1 : 0;
else
{
T uMask = iBits==64 ? (T)0xFFFFFFFFFFFFFFFFULL : (T)( (1ULL<<iBits)-1 );
tValue &= uMask;
}
dTmpStorage.Resize ( sizeof(tValue) );
memcpy ( dTmpStorage.Begin(), &tValue, dTmpStorage.GetLength() );
pAddedAttrs[iStoredAttr] = dTmpStorage;
}
static void ProcessStoredAttrs ( DocstoreBuilder_i::Doc_t & tStoredDoc, const InsertDocData_c & tDoc, const CSphSchema & tSchema, CSphVector<CSphVector<BYTE>> & dTmpAttrStorage )
{
int iNumStoredAttrs = 0;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
if ( tSchema.IsAttrStored(i) )
iNumStoredAttrs++;
if ( !iNumStoredAttrs )
return;
dTmpAttrStorage.Resize ( tSchema.GetAttrsCount() );
VecTraits_T<BYTE> * pAddedAttrs = tStoredDoc.m_dFields.AddN ( iNumStoredAttrs );
const char ** ppStr = tDoc.m_dStrings.Begin();
int iStrAttr = 0;
int iMva = 0;
int iStoredAttr = 0;
int iColumnarAttr = 0;
for ( int i=0; i<tSchema.GetAttrsCount(); ++i )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
bool bStored = tSchema.IsAttrStored(i);
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_STRING:
{
if ( bStored )
{
BYTE * pStr = ppStr ? (BYTE *) ppStr[iStrAttr] : nullptr;
pAddedAttrs[iStoredAttr] = { pStr, pStr ? (int) strlen ((const char *) pStr ) : 0 };
}
iStrAttr++;
}
break;
case SPH_ATTR_JSON:
iStrAttr++;
if ( !bStored )
break;
assert ( 0 && "Internal error: stored json" );
break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
case SPH_ATTR_FLOAT_VECTOR:
{
int iNumValues = 0;
bool bDefault = false;
const int64_t * pMva = tDoc.GetMVA(iMva);
std::tie ( iNumValues, bDefault ) = tDoc.ReadMVALength(pMva);
iMva += iNumValues+1;
if ( !bStored )
break;
if ( tAttr.m_eAttrType == SPH_ATTR_INT64SET )
pAddedAttrs[iStoredAttr] = { (BYTE*)pMva, int(iNumValues*sizeof(int64_t)) };
else
{
dTmpAttrStorage[i].Resize ( iNumValues*sizeof(DWORD) );
DWORD * pAttrs = (DWORD*)dTmpAttrStorage[i].Begin();
for ( int iValue = 0; iValue < iNumValues; iValue++ )
pAttrs[iValue] = (DWORD)pMva[iValue];
pAddedAttrs[iStoredAttr] = dTmpAttrStorage[i];
}
}
break;
case SPH_ATTR_BIGINT:
if ( bStored )
StoreAttrValue<int64_t> ( tDoc, tAttr, iColumnarAttr, iStoredAttr, pAddedAttrs, dTmpAttrStorage[i] );
break;
default:
if ( bStored )
StoreAttrValue<DWORD> ( tDoc, tAttr, iColumnarAttr, iStoredAttr, pAddedAttrs, dTmpAttrStorage[i] );
break;
}
if ( tAttr.IsColumnar() )
iColumnarAttr++;
if ( bStored )
iStoredAttr++;
}
}
DocstoreBuilder_i::Doc_t * RtIndex_c::FetchDocFields ( DocstoreBuilder_i::Doc_t & tStoredDoc, const InsertDocData_c & tDoc, CSphSource_StringVector & tSrc, CSphVector<CSphVector<BYTE>> & dTmpAttrStorage ) const
{
if ( !m_tSchema.HasStoredFields() && !m_tSchema.HasStoredAttrs() )
return nullptr;
tSrc.GetDocFields ( tStoredDoc.m_dFields );
assert ( tStoredDoc.m_dFields.GetLength()==m_tSchema.GetFieldsCount() );
// filter out non-hl fields (should already be null)
int iField = 0;
for ( int i = 0; i < m_tSchema.GetFieldsCount(); i++ )
{
if ( !m_tSchema.IsFieldStored(i) )
tStoredDoc.m_dFields.Remove(iField);
else
iField++;
}
ProcessStoredAttrs ( tStoredDoc, tDoc, m_tSchema, dTmpAttrStorage );
return &tStoredDoc;
}
bool RtIndex_c::VerifyKNN ( InsertDocData_c & tDoc, CSphString & sError ) const
{
int iMva = 0;
for ( int i = 0; i < m_tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(i);
if ( !IsMvaAttr ( tAttr.m_eAttrType ) )
continue;
int iNumValues = 0;
bool bDefault = false;
const int64_t * pMva = tDoc.GetMVA(iMva);
std::tie ( iNumValues, bDefault ) = tDoc.ReadMVALength(pMva);
iMva += iNumValues + 1;
if ( tAttr.m_eAttrType!=SPH_ATTR_FLOAT_VECTOR || !tAttr.IsIndexedKNN() )
continue;
if ( !bDefault && iNumValues!=tAttr.m_tKNN.m_iDims )
{
sError.SetSprintf ( "KNN error: data has %d values, index '%s' needs %d values", iNumValues, tAttr.m_sName.cstr(), tAttr.m_tKNN.m_iDims );
return false;
}
}
return true;
}
bool RtIndex_c::AddDocument ( InsertDocData_c & tDoc, bool bReplace, const CSphString & sTokenFilterOptions, CSphString & sError, CSphString & sWarning, RtAccum_t * pAcc )
{
assert ( g_bRTChangesAllowed );
assert ( m_tSchema.GetAttrIndex ( sphGetDocidName() )==0 );
assert ( m_tSchema.GetAttr ( sphGetDocidName() )->m_eAttrType==SPH_ATTR_BIGINT );
DocID_t tDocID = tDoc.GetID();
// here is only point related to current index - generate unique autoID, or check that provided is not duplicate.
if ( !tDocID || !bReplace )
{
auto tGuard = RtGuard();
if ( !tDocID ) // docID wasn't provided, need to generate autoID
{
bReplace = false; // with absent docID we effectively fall to plain 'insert' - nothing to kill
do
tDocID = UidShort ();
while ( tGuard.m_dRamSegs.any_of (
[tDocID] ( const ConstRtSegmentRefPtf_t & p ) { return p->FindAliveRow ( tDocID ); } ) );
tDoc.SetID ( tDocID );
} else
{
// docID was provided, but that is new insert and we need to check for duplicates
assert ( !bReplace && tDocID!=0 );
if ( tGuard.m_dRamSegs.any_of ( [tDocID] ( const ConstRtSegmentRefPtf_t & p ) { return p->FindAliveRow ( tDocID ); })
|| tGuard.m_dDiskChunks.any_of ( [tDocID] ( const ConstDiskChunkRefPtr_t & p ) { return p->Cidx().IsAlive(tDocID); }))
{
sError.SetSprintf ( "duplicate id '" UINT64_FMT "'", tDocID );
return false; // already exists and not deleted; INSERT fails
}
}
}
TokenizerRefPtr_c tTokenizer = CloneIndexingTokenizer();
if (!tTokenizer)
{
sError.SetSprintf ( "internal error: no indexing tokenizer available" );
return false;
}
MEMORY ( MEM_INDEX_RT );
if ( !BindAccum ( pAcc, &sError ) )
return false;
tDoc.m_tDoc.m_tRowID = pAcc->GenerateRowID();
// OPTIMIZE? do not create filter on each(!) INSERT
if ( !m_tSettings.m_sIndexTokenFilter.IsEmpty() )
{
Tokenizer::AddPluginFilterTo ( tTokenizer, m_tSettings.m_sIndexTokenFilter, sError );
if ( !sError.IsEmpty() )
return false;
if ( !tTokenizer->SetFilterSchema ( m_tSchema, sError ) )
return false;
if ( !sTokenFilterOptions.IsEmpty() )
if ( !tTokenizer->SetFilterOptions ( sTokenFilterOptions.cstr(), sError ) )
return false;
}
// OPTIMIZE? do not create filter on each(!) INSERT
if ( m_tSettings.m_uAotFilterMask )
sphAotTransformFilter ( tTokenizer, m_pDict, m_tSettings.m_bIndexExactWords, m_tSettings.m_uAotFilterMask );
CSphSource_StringVector tSrc ( tDoc.m_dFields, m_tSchema );
// SPZ setup
if ( m_tSettings.m_bIndexSP && !tTokenizer->EnableSentenceIndexing ( sError ) )
return false;
if ( !m_tSettings.m_sZones.IsEmpty() && !tTokenizer->EnableZoneIndexing ( sError ) )
return false;
if ( m_tSettings.m_bHtmlStrip && !tSrc.SetStripHTML ( m_tSettings.m_sHtmlIndexAttrs.cstr(), m_tSettings.m_sHtmlRemoveElements.cstr(), m_tSettings.m_bIndexSP, m_tSettings.m_sZones.cstr(), sError ) )
return false;
tSrc.Setup ( m_tSettings, nullptr );
tSrc.SetTokenizer ( std::move ( tTokenizer ) );
tSrc.SetDict ( pAcc->m_pDict );
// OPTIMIZE? do not clone filters on each INSERT
if ( m_pFieldFilter )
tSrc.SetFieldFilter ( m_pFieldFilter->Clone() );
tSrc.SetMorphFields ( m_tMorphFields );
if ( !tSrc.Connect ( m_sLastError ) )
return false;
m_tSchema.CloneWholeMatch ( tSrc.m_tDocInfo, tDoc.m_tDoc );
bool bEOF = false;
if ( !tSrc.IterateStart ( sError ) || !tSrc.IterateDocument ( bEOF, sError ) )
return false;
ISphHits * pHits = tSrc.IterateHits ( sError );
pAcc->GrabLastWarning ( sWarning );
if ( !VerifyKNN ( tDoc, sError ) )
return false;
CSphVector<CSphVector<BYTE>> dTmpAttrStorage;
DocstoreBuilder_i::Doc_t tStoredDoc;
DocstoreBuilder_i::Doc_t * pStoredDoc = FetchDocFields ( tStoredDoc, tDoc, tSrc, dTmpAttrStorage );
tDoc.m_iTotalBytes = tSrc.GetStats().m_iTotalBytes;
return AddDocument ( pHits, tDoc, bReplace, pStoredDoc, sError, sWarning, pAcc );
}
bool RtIndex_i::PrepareAccum ( RtAccum_t* pAcc, bool bWordDict, CSphString* pError )
{
assert ( pAcc );
if ( pAcc->GetIndex() && pAcc->GetIndex()!=this )
{
if ( pError )
pError->SetSprintf ( "current txn is working with another table ('%s')", pAcc->GetIndex()->GetName() );
return false;
}
if ( pAcc->GetIndex() && pAcc->GetSchemaHash()!=GetSchemaHash() )
{
if ( pError )
pError->SetSprintf ( "current txn is working with table's another schema ('%s'), restart session", pAcc->GetIndex()->GetName() );
return false;
}
assert ( pAcc->GetIndex()==nullptr || pAcc->GetIndex()==this );
if ( !pAcc->GetIndex() )
{
pAcc->SetIndex ( this );
pAcc->SetupDict ( this, m_pDict, bWordDict );
}
return true;
}
bool RtIndex_c::BindAccum ( RtAccum_t * pAccExt, CSphString * pError )
{
return PrepareAccum ( pAccExt, m_bKeywordDict, pError );
}
bool RtIndex_c::AddDocument ( ISphHits * pHits, const InsertDocData_c & tDoc, bool bReplace, const DocstoreBuilder_i::Doc_t * pStoredDoc, CSphString & sError, CSphString & sWarning, RtAccum_t * pAccExt )
{
assert ( g_bRTChangesAllowed );
auto * pAcc = (RtAccum_t *)pAccExt;
if ( pAcc )
pAcc->AddDocument ( pHits, tDoc, bReplace, m_tSchema.GetRowSize(), pStoredDoc );
return !!pAcc;
}
// cook checkpoints - make NULL terminating strings from offsets
static void FixupSegmentCheckpoints ( RtSegment_t * pSeg )
{
assert ( pSeg &&
( !pSeg->m_dWordCheckpoints.GetLength() || pSeg->m_dKeywordCheckpoints.GetLength() ) );
if ( !pSeg->m_dWordCheckpoints.GetLength() )
return;
const char * pBase = (const char *)pSeg->m_dKeywordCheckpoints.Begin();
assert ( pBase );
for ( auto & dCheckpoint : pSeg->m_dWordCheckpoints )
dCheckpoint.m_szWord = pBase + dCheckpoint.m_uWordID;
}
static void CreateSegmentHits ( RtAccum_t& tAcc, RtSegment_t * pSeg, int iWordsCheckpoint, ESphHitless eHitless, const VecTraits_T<SphWordID_t> & dHitlessWords )
{
assert(pSeg);
auto& dAccum = tAcc.m_dAccum;
bool bKeywordDict = tAcc.m_bKeywordDict;
CSphWordHit& tClosingHit = dAccum.Add();
tClosingHit.m_uWordID = WORDID_MAX;
tClosingHit.m_tRowID = INVALID_ROWID;
tClosingHit.m_uWordPos = EMPTY_HIT;
RtDoc_t tDoc;
RtWord_t tWord;
RtDocWriter_c tOutDoc ( pSeg->m_dDocs );
RtWordWriter_c tOutWord ( pSeg->m_dWords, pSeg->m_dWordCheckpoints, pSeg->m_dKeywordCheckpoints, bKeywordDict, iWordsCheckpoint, eHitless );
RtHitWriter_c tOutHit ( pSeg->m_dHits );
const BYTE * pPacketBase = bKeywordDict ? tAcc.GetPackedKeywords() : nullptr;
Hitpos_t uEmbeddedHit = EMPTY_HIT;
Hitpos_t uPrevHit = EMPTY_HIT;
for ( const CSphWordHit & tHit : dAccum )
{
// new keyword or doc; flush current doc
if ( tHit.m_uWordID!=tWord.m_uWordID || tHit.m_tRowID!=tDoc.m_tRowID )
{
if ( tDoc.m_tRowID!=INVALID_ROWID )
{
++tWord.m_uDocs;
tWord.m_uHits += tDoc.m_uHits;
if ( uEmbeddedHit )
{
assert ( tDoc.m_uHits==1 );
tDoc.m_uHit = uEmbeddedHit;
}
tOutDoc << tDoc;
tDoc.m_uDocFields = 0;
tDoc.m_uHits = 0;
tDoc.m_uHit = tOutHit.WriterPos();
}
tDoc.m_tRowID = tHit.m_tRowID;
tOutHit.ZipRestart ();
uEmbeddedHit = EMPTY_HIT;
uPrevHit = EMPTY_HIT;
}
// new keyword; flush current keyword
if ( tHit.m_uWordID!=tWord.m_uWordID )
{
tOutDoc.ZipRestart ();
if ( tWord.m_uWordID )
{
if ( bKeywordDict )
{
const BYTE * pPackedWord = pPacketBase + tWord.m_uWordID;
assert ( pPackedWord[0] && pPackedWord[0]+1<tAcc.GetPackedLen() );
tWord.m_sWord = pPackedWord;
}
tOutWord << tWord;
}
tWord.m_uWordID = tHit.m_uWordID;
tWord.m_uDocs = 0;
tWord.m_uHits = 0;
tWord.m_uDoc = tOutDoc.WriterPos();
uPrevHit = EMPTY_HIT;
if ( eHitless==SPH_HITLESS_NONE || eHitless==SPH_HITLESS_ALL || !tWord.m_uWordID || tHit.m_uWordPos==EMPTY_HIT )
{
tWord.m_bHasHitlist = ( eHitless==SPH_HITLESS_NONE || !tWord.m_uWordID );
} else
{
SphWordID_t tWordID = tWord.m_uWordID;
if ( bKeywordDict && !dHitlessWords.IsEmpty() )
{
const BYTE * pPackedWord = pPacketBase + tWord.m_uWordID;
DWORD uLen = pPackedWord[0];
assert ( uLen && (int)uLen+1<tAcc.GetPackedLen() );
memcpy ( &tWordID, pPackedWord + uLen + 1, sizeof ( tWordID ) );
}
tWord.m_bHasHitlist = ( dHitlessWords.BinarySearch ( tWordID )==nullptr );
}
}
// might be a duplicate
if ( uPrevHit==tHit.m_uWordPos )
continue;
// just a new hit
if ( !tWord.m_bHasHitlist )
{
if ( !tDoc.m_uHits )
uEmbeddedHit = tHit.m_uWordPos;
tDoc.m_uHits = 1; // FIXME!!! hitless hit-count always 1
} else if ( !tDoc.m_uHits )
{
uEmbeddedHit = tHit.m_uWordPos;
++tDoc.m_uHits;
} else
{
if ( uEmbeddedHit )
tOutHit << std::exchange ( uEmbeddedHit, 0 );
tOutHit << tHit.m_uWordPos;
++tDoc.m_uHits;
}
uPrevHit = tHit.m_uWordPos;
const int iField = HITMAN::GetField ( tHit.m_uWordPos );
if ( iField<32 )
tDoc.m_uDocFields |= ( 1UL<<iField );
}
}
RtSegment_t * CreateSegment ( RtAccum_t* pAcc, int iWordsCheckpoint, ESphHitless eHitless, const VecTraits_T<SphWordID_t> & dHitlessWords, CSphString & sError )
{
TRACE_CONN ( "conn", "CreateSegment" );
assert ( pAcc );
if ( !pAcc->m_uAccumDocs )
return nullptr;
MEMORY ( MEM_RT_ACCUM );
auto * pSeg = new RtSegment_t ( pAcc->m_uAccumDocs, pAcc->GetIndex()->GetInternalSchema() );
FakeWL_t tFakeLock {pSeg->m_tLock};
CreateSegmentHits ( *pAcc, pSeg, iWordsCheckpoint, eHitless, dHitlessWords );
if ( pAcc->m_bKeywordDict )
FixupSegmentCheckpoints(pSeg);
pSeg->m_dRows.SwapData ( pAcc->m_dAccumRows );
pSeg->m_dBlobs.SwapData ( pAcc->m_dBlobs) ;
std::swap ( pSeg->m_pDocstore, pAcc->m_pDocstore );
if ( pAcc->m_pColumnarBuilder )
{
assert( pAcc->m_pIndex);
pSeg->m_pColumnar = CreateColumnarRT ( pAcc->m_pIndex->GetInternalSchema(), pAcc->m_pColumnarBuilder.get() );
}
pSeg->BuildDocID2RowIDMap ( pAcc->m_pIndex->GetInternalSchema() );
pAcc->m_tNextRowID = 0;
return pSeg;
}
void RtIndex_c::CopyWord ( RtSegment_t& tDstSeg, RtWord_t& tDstWord, RtDocWriter_c& tDstDoc, const RtSegment_t& tSrcSeg, const RtWord_t* pSrcWord, const VecTraits_T<RowID_t>& dRowMap )
{
RtDocReader_c tSrcDocs ( &tSrcSeg, *pSrcWord );
// copy docs
while ( tSrcDocs.UnzipDoc() )
{
auto tNewRowID = dRowMap[tSrcDocs->m_tRowID];
if ( tNewRowID==INVALID_ROWID )
continue;
RtDoc_t tDoc = *tSrcDocs;
tDoc.m_tRowID = tNewRowID;
++tDstWord.m_uDocs;
tDstWord.m_uHits += tSrcDocs->m_uHits;
if ( tSrcDocs->m_uHits!=1 )
{
tDoc.m_uHit = tDstSeg.m_dHits.GetLength ();
tDstSeg.m_dHits.Append ( GetHitsBlob ( tSrcSeg, *tSrcDocs ) );
// this is reference of what append (hitsblob) above does.
// RtHitWriter_c tOutHit ( &tDst );
// tDoc.m_uHit = tOutHit.ZipHitPos();
// for ( DWORD uValue=tInHit.UnzipHit(); uValue; uValue=tInHit.UnzipHit() )
// tOutHit.ZipHit ( uValue );
}
// copy doc
tDstDoc << tDoc;
}
}
template<typename FN>
inline void ProcessField ( RtDoc_t & tOutDoc, DWORD uHit, int iKillField, FN&& fnProcessor )
{
assert ( iKillField >=0 );
int iField = HITMAN::GetField ( uHit );
if ( iKillField==iField )
return;
if ( iField>iKillField )
{
--iField;
HITMAN::DecrementField ( uHit );
}
++tOutDoc.m_uHits;
if ( iField<32 )
tOutDoc.m_uDocFields |= ( 1UL << iField );
fnProcessor ( uHit );
}
static void CopyWordWithoutField ( CSphTightVector<BYTE> * pOutHits, RtDocWriter_c & tOutDocs, RtWord_t & tOutWord, const RtSegment_t & tSrc, RtDocReader_c & tInDocs, int iKillField )
{
assert ( iKillField>=0 );
while ( tInDocs.UnzipDoc() )
{
RtDoc_t tOutDoc = *tInDocs;
tOutDoc.m_uHits = tOutDoc.m_uDocFields = 0;
if ( tInDocs->m_uHits!=1 )
{
RtHitReader_c tInHits ( tSrc, *tInDocs );
RtHitWriter_c tOutHits ( *pOutHits );
tOutDoc.m_uHit = tOutHits.WriterPos();
while ( tInHits.UnzipHit() )
ProcessField ( tOutDoc, *tInHits, iKillField, [&tOutHits] ( Hitpos_t x ) { tOutHits << x; } );
} else
ProcessField ( tOutDoc, tOutDoc.m_uHit, iKillField, [&tOutDoc] ( Hitpos_t x ) { tOutDoc.m_uHit = x; } );
if ( !tOutDoc.m_uHits )
continue;
tOutDocs << tOutDoc;
++tOutWord.m_uDocs;
tOutWord.m_uHits += tOutDoc.m_uHits;
}
}
void RtIndex_c::DeleteFieldFromDict ( RtSegment_t * pSeg, int iKillField )
{
assert ( iKillField>=0 );
CSphTightVector<BYTE> dWords;
CSphVector<RtWordCheckpoint_t> dWordCheckpoints;
CSphTightVector<BYTE> dDocs;
CSphTightVector<BYTE> dHits;
CSphVector<BYTE> dKeywordCheckpoints;
const RtSegment_t & tInSeg = *pSeg;
dWords.Reserve ( tInSeg.m_dWords.GetLength () );
dDocs.Reserve ( tInSeg.m_dDocs.GetLength () );
dHits.Reserve ( tInSeg.m_dHits.GetLength () );
RtWordWriter_c tOutWords ( dWords, dWordCheckpoints, dKeywordCheckpoints, m_bKeywordDict, m_iWordsCheckpoint, m_tSettings.m_eHitless );
RtWordReader_c tInWord ( &tInSeg, m_bKeywordDict, m_iWordsCheckpoint, m_tSettings.m_eHitless );
RtWord_t tOutWord;
while ( tInWord.UnzipWord() )
{
tOutWord = *tInWord;
tOutWord.m_uDocs = tOutWord.m_uHits = 0;
RtDocWriter_c tOutDocs ( dDocs );
tOutWord.m_uDoc = tOutDocs.WriterPos();
RtDocReader_c tInDocs ( &tInSeg, *tInWord );
CopyWordWithoutField ( &dHits, tOutDocs, tOutWord, tInSeg, tInDocs, iKillField );
// append word to the dictionary
if ( tOutWord.m_uDocs )
tOutWords << tOutWord;
}
// swap data to refreshed
RtSegment_t & tOutSeg = *pSeg;
tOutSeg.m_dWords.SwapData ( dWords );
tOutSeg.m_dWordCheckpoints.SwapData ( dWordCheckpoints );
tOutSeg.m_dDocs.SwapData ( dDocs );
tOutSeg.m_dHits.SwapData ( dHits );
tOutSeg.m_dKeywordCheckpoints.SwapData ( dKeywordCheckpoints );
if ( m_bKeywordDict )
FixupSegmentCheckpoints ( pSeg );
BuildSegmentInfixes ( &tOutSeg, m_pDict->HasMorphology (), m_bKeywordDict, m_tSettings.m_iMinInfixLen, m_iWordsCheckpoint, ( m_iMaxCodepointLength>1 ), m_tSettings.m_eHitless );
}
// iterate over alive rows range
class RtLiveRows_c
{
public:
class Iterator_c
{
public:
Iterator_c ( const RtLiveRows_c & tOwner, bool bBegin )
: m_tOwner { tOwner }
, m_tRowID { bBegin ? tOwner.FirstAliveRow () : tOwner.EndRow() }
{}
RowID_t operator*() const { return m_tRowID; };
bool operator!= ( const Iterator_c & rhs ) const { return m_tRowID!=rhs.m_tRowID; }
Iterator_c & operator++ ()
{
m_tRowID = m_tOwner.NextAliveRow(m_tRowID);
return *this;
}
private:
const RtLiveRows_c & m_tOwner;
RowID_t m_tRowID = 0;
};
explicit RtLiveRows_c ( const RtSegment_t & tSeg )
: m_tRowIDMax ( tSeg.m_uRows )
, m_tDeadRowMap ( tSeg.m_tDeadRowMap )
{}
// c++11 style iteration
Iterator_c begin () const { return { *this, true }; }
Iterator_c end() const { return { *this, false }; }
private:
RowID_t m_tRowID = 0;
RowID_t m_tRowIDMax = 0;
const DeadRowMap_Ram_c & m_tDeadRowMap;
RowID_t SkipDeadRows ( RowID_t tRowID ) const
{
while ( tRowID<m_tRowIDMax && m_tDeadRowMap.IsSet(tRowID) )
++tRowID;
return tRowID;
}
RowID_t FirstAliveRow() const { return SkipDeadRows(m_tRowID); }
RowID_t EndRow() const { return m_tRowIDMax; }
RowID_t NextAliveRow ( RowID_t tRowID ) const { return SkipDeadRows ( tRowID+1 ); }
};
template <typename BLOOM_TRAITS>
inline bool BuildBloom_T ( const BYTE * sWord, int iLen, int iInfixCodepointCount, bool bUtf8, int iKeyValCount, BLOOM_TRAITS & tBloom )
{
if ( iLen<iInfixCodepointCount )
return false;
// byte offset for each codepoints
std::array<BYTE, SPH_MAX_WORD_LEN+1> dOffsets { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42 };
assert ( iLen<=SPH_MAX_WORD_LEN || ( bUtf8 && iLen<=SPH_MAX_WORD_LEN*3 ) );
int iCodes = iLen;
if ( bUtf8 )
{
// build an offsets table into the bytestring
iCodes = 0;
const BYTE * s = sWord;
const BYTE * sEnd = sWord + iLen;
while ( s<sEnd )
{
int iCodepoints = sphUtf8CharBytes ( *s );
assert ( iCodepoints>=1 && iCodepoints<=4 );
dOffsets[iCodes+1] = dOffsets[iCodes] + (BYTE)iCodepoints;
s += iCodepoints;
iCodes++;
}
}
if ( iCodes<iInfixCodepointCount )
return false;
int iKeyBytes = iKeyValCount * 64;
for ( int i=0; i<=iCodes-iInfixCodepointCount && tBloom.IterateNext(); i++ )
{
int iFrom = dOffsets[i];
int iTo = dOffsets[i+iInfixCodepointCount];
uint64_t uHash64 = sphFNV64 ( sWord+iFrom, iTo-iFrom );
uHash64 = ( uHash64>>32 ) ^ ( (DWORD)uHash64 );
int iByte = (int)( uHash64 % iKeyBytes );
int iPos = iByte/64;
uint64_t uVal = U64C(1) << ( iByte % 64 );
tBloom.Set ( iPos, uVal );
}
return true;
}
// explicit instantiations
bool BuildBloom ( const BYTE * sWord, int iLen, int iInfixCodepointCount, bool bUtf8, int iKeyValCount
, BloomCheckTraits_t &tBloom )
{
return BuildBloom_T ( sWord, iLen, iInfixCodepointCount, bUtf8, iKeyValCount, tBloom );
}
bool BuildBloom ( const BYTE * sWord, int iLen, int iInfixCodepointCount, bool bUtf8, int iKeyValCount
, BloomGenTraits_t &tBloom )
{
return BuildBloom_T ( sWord, iLen, iInfixCodepointCount, bUtf8, iKeyValCount, tBloom );
}
void BuildSegmentInfixes ( RtSegment_t * pSeg, bool bHasMorphology, bool bKeywordDict, int iMinInfixLen, int iWordsCheckpoint, bool bUtf8, ESphHitless eHitlessMode )
{
if ( !pSeg || !bKeywordDict || !iMinInfixLen )
return;
int iBloomSize = ( pSeg->m_dWordCheckpoints.GetLength()+1 ) * BLOOM_PER_ENTRY_VALS_COUNT * BLOOM_HASHES_COUNT;
pSeg->m_dInfixFilterCP.Resize ( iBloomSize );
// reset filters
pSeg->m_dInfixFilterCP.Fill ( 0 );
uint64_t * pRough = pSeg->m_dInfixFilterCP.Begin();
RtWordReader_c rdDictRough ( pSeg, true, iWordsCheckpoint, eHitlessMode );
while ( rdDictRough.UnzipWord() )
{
const BYTE * pDictWord = rdDictRough->m_sWord+1;
if ( bHasMorphology && *pDictWord!=MAGIC_WORD_HEAD_NONSTEMMED )
continue;
int iLen = rdDictRough->m_sWord[0];
if ( *pDictWord<0x20 ) // anyway skip heading magic chars in the prefix, like NONSTEMMED maker
{
pDictWord++;
iLen--;
}
uint64_t * pVal = pRough + rdDictRough.Checkpoint() * BLOOM_PER_ENTRY_VALS_COUNT * BLOOM_HASHES_COUNT;
BloomGenTraits_t tBloom0 ( pVal );
BloomGenTraits_t tBloom1 ( pVal+BLOOM_PER_ENTRY_VALS_COUNT );
BuildBloom ( pDictWord, iLen, BLOOM_NGRAM_0, bUtf8, BLOOM_PER_ENTRY_VALS_COUNT, tBloom0 );
BuildBloom ( pDictWord, iLen, BLOOM_NGRAM_1, bUtf8, BLOOM_PER_ENTRY_VALS_COUNT, tBloom1 );
}
}
CSphFixedVector<RowID_t> RtIndex_c::CopyAttributesFromAliveDocs ( RtSegment_t & tDstSeg, const RtSegment_t & tSrcSeg, RtAttrMergeContext_t & tCtx ) const REQUIRES ( tDstSeg.m_tLock ) REQUIRES (m_tWorkers.SerialChunkAccess())
{
CSphFixedVector<RowID_t> dRowMap { tSrcSeg.m_uRows };
dRowMap.Fill ( INVALID_ROWID );
// mark us busy - updates to this seg will be collected.
tSrcSeg.m_bAttrsBusy.store ( true, std::memory_order_release );
// perform merging attrs in single fiber - that eliminates concurrency with optimize
// ScopedScheduler_c tSerialFiber { m_tWorkers.SerialChunkAccess() };
TRACE_SCHED ( "rt", "CopyAttrs" );
auto dColumnarIterators = CreateAllColumnarIterators ( tSrcSeg.m_pColumnar.get(), m_tSchema );
CSphVector<int64_t> dTmp;
const CSphColumnInfo * pBlobRowLocator = m_tSchema.GetAttr ( sphGetBlobLocatorName() );
// ensure no update will break (resize) blob when we're merging
SccRL_t rLock ( tSrcSeg.m_tLock );
for ( auto tRowID : RtLiveRows_c(tSrcSeg) )
{
CSphRowitem * pRow = tSrcSeg.m_dRows.Begin() + (int64_t)tRowID * m_iStride;
auto pNewRow = tDstSeg.m_dRows.AddN ( m_iStride );
memcpy ( pNewRow, pRow, m_iStride*sizeof(CSphRowitem) );
if ( tCtx.m_iNumBlobs )
{
assert ( pBlobRowLocator ) ;
int64_t iOldOffset = sphGetRowAttr ( pRow , pBlobRowLocator->m_tLocator );
int64_t iNewOffset = sphCopyBlobRow ( tDstSeg.m_dBlobs, tSrcSeg.m_dBlobs, iOldOffset, tCtx.m_iNumBlobs );
sphSetRowAttr ( pNewRow, pBlobRowLocator->m_tLocator, iNewOffset );
}
ARRAY_FOREACH ( i, dColumnarIterators )
{
auto & tIt = dColumnarIterators[i];
SetColumnarAttr ( i, tIt.second, tCtx.m_pColumnarBuilder, tIt.first, tRowID, dTmp );
}
if ( tDstSeg.m_pDocstore )
tDstSeg.m_pDocstore->AddPackedDoc ( tCtx.m_tResultRowID, tSrcSeg.m_pDocstore.get(), tRowID );
dRowMap[tRowID] = tCtx.m_tResultRowID++;
}
return dRowMap;
}
int RtIndex_c::CompareWords ( const RtWord_t * pWord1, const RtWord_t * pWord2 ) const
{
if ( !pWord1 )
return pWord2 ? 1 : 0;
if ( !pWord2 )
return -1;
if ( m_bKeywordDict )
return sphDictCmpStrictly ( (const char *)pWord1->m_sWord+1, *pWord1->m_sWord, (const char *)pWord2->m_sWord+1, *pWord2->m_sWord );
if ( pWord1->m_uWordID<pWord2->m_uWordID )
return -1;
if ( pWord1->m_uWordID>pWord2->m_uWordID )
return 1;
return 0;
}
void RtIndex_c::MergeKeywords ( RtSegment_t & tSeg, const RtSegment_t & tSeg1, const RtSegment_t & tSeg2,
const VecTraits_T<RowID_t> & dRowMap1, const VecTraits_T<RowID_t> & dRowMap2 ) const
{
tSeg.m_dWords.Reserve ( Max ( tSeg1.m_dWords.GetLength(), tSeg2.m_dWords.GetLength() ) );
tSeg.m_dDocs.Reserve ( Max ( tSeg1.m_dDocs.GetLength(), tSeg2.m_dDocs.GetLength() ) );
tSeg.m_dHits.Reserve ( Max ( tSeg1.m_dHits.GetLength(), tSeg2.m_dHits.GetLength() ) );
RtDocWriter_c tOutDoc ( tSeg.m_dDocs );
RtWordWriter_c tOut ( tSeg.m_dWords, tSeg.m_dWordCheckpoints, tSeg.m_dKeywordCheckpoints, m_bKeywordDict, m_iWordsCheckpoint, m_tSettings.m_eHitless );
RtWordReader_c tIn1 ( &tSeg1, m_bKeywordDict, m_iWordsCheckpoint, m_tSettings.m_eHitless );
RtWordReader_c tIn2 ( &tSeg2, m_bKeywordDict, m_iWordsCheckpoint, m_tSettings.m_eHitless );
const RtWord_t* pWords1 = tIn1.UnzipWord();
const RtWord_t* pWords2 = tIn2.UnzipWord();
while ( pWords1 || pWords2 )
{
int iCmp = CompareWords ( pWords1, pWords2 );
RtWord_t tWord = iCmp<=0 ? *pWords1 : *pWords2;
tWord.m_uDocs = 0;
tWord.m_uHits = 0;
tWord.m_uDoc = tOutDoc.WriterPos();
// if words are equal, copy both
if ( iCmp<=0 )
CopyWord ( tSeg, tWord, tOutDoc, tSeg1, pWords1, dRowMap1 );
if ( iCmp>=0 )
CopyWord ( tSeg, tWord, tOutDoc, tSeg2, pWords2, dRowMap2 );
// append non-empty word to the dictionary
if ( tWord.m_uDocs )
tOut << tWord;
// move forward. Beware, tWord refer to static buffer inside tIn1 or tIn2, so call UnzipWord() before storing the word is NOT safe!
if ( iCmp <= 0 )
pWords1 = tIn1.UnzipWord(); // move forward
if ( iCmp >= 0 )
pWords2 = tIn2.UnzipWord(); // move forward
tOutDoc.ZipRestart();
}
}
// it seems safe to kill documents directly during merging.
// already killed will not come to the merged.
// killed after pass of merge attributes will survive, and need to be killed finally by separate killmulti
RtSegment_t* RtIndex_c::MergeTwoSegments ( const RtSegment_t* pA, const RtSegment_t* pB ) const REQUIRES (m_tWorkers.SerialChunkAccess())
{
////////////////////
// merge attributes
////////////////////
RTRLOG << "MergeTwoSegments invoked";
int nBlobAttrs = 0;
for ( int i = 0; i < m_tSchema.GetAttrsCount(); ++i )
if ( sphIsBlobAttr ( m_tSchema.GetAttr(i) ) )
++nBlobAttrs;
RowID_t tNextRowID = 0;
bool bBothConsistent = CheckSegmentConsistency ( pA ) && CheckSegmentConsistency ( pB );
auto pColumnarBuilder = CreateColumnarBuilderRT(m_tSchema);
RtAttrMergeContext_t tCtx ( nBlobAttrs, tNextRowID, pColumnarBuilder.get() );
auto * pSeg = new RtSegment_t (0, m_tSchema);
FakeWL_t _ { pSeg->m_tLock }; // as pSeg is just created - we don't need real guarding and use fake lock to mute thread safety warnings
assert ( !!pA->m_pDocstore==!!pB->m_pDocstore );
if ( ( m_tSchema.HasStoredFields() || m_tSchema.HasStoredAttrs() ) && pA->m_pDocstore && pB->m_pDocstore )
pSeg->SetupDocstore ( &m_tSchema );
// we might need less because of killed, but we can not know yet. Reserving more than necessary is strictly not desirable!
pSeg->m_dRows.Reserve ( m_iStride * Max ( pA->m_tAliveRows.load ( std::memory_order_relaxed ), pB->m_tAliveRows.load (std::memory_order_relaxed ) ) );
pSeg->m_dBlobs.Reserve ( [pA, pB]() NO_THREAD_SAFETY_ANALYSIS { return Max ( pA->m_dBlobs.GetLength(), pB->m_dBlobs.GetLength() ); }() );
CSphFixedVector<RowID_t> dRowMapA = CopyAttributesFromAliveDocs ( *pSeg, *pA, tCtx );
CSphFixedVector<RowID_t> dRowMapB = CopyAttributesFromAliveDocs ( *pSeg, *pB, tCtx );
assert ( tNextRowID<=INT_MAX );
pSeg->m_uRows = tNextRowID;
pSeg->m_tAliveRows.store ( pSeg->m_uRows, std::memory_order_relaxed );
pSeg->m_tDeadRowMap.Reset ( pSeg->m_uRows );
pSeg->m_pColumnar = CreateColumnarRT ( m_tSchema, pColumnarBuilder.get() );
RTRLOG << "MergeTwoSegments: new seg has " << pSeg->m_uRows << " rows";
// merged segment might be completely killed by committed data
if ( !pSeg->m_uRows )
{
SafeRelease ( pSeg );
return nullptr;
}
assert ( pSeg->GetStride() == m_iStride );
pSeg->BuildDocID2RowIDMap ( m_tSchema );
MergeKeywords ( *pSeg, *pA, *pB, dRowMapA, dRowMapB );
if ( m_bKeywordDict )
FixupSegmentCheckpoints ( pSeg );
BuildSegmentInfixes ( pSeg, m_pDict->HasMorphology(), m_bKeywordDict, m_tSettings.m_iMinInfixLen, m_iWordsCheckpoint, ( m_iMaxCodepointLength>1 ), m_tSettings.m_eHitless );
assert ( pSeg->m_uRows );
assert ( pSeg->m_tAliveRows==pSeg->m_uRows );
if ( bBothConsistent && !CheckSegmentConsistency ( pSeg, false ) )
DumpMerge ( pA, pB, pSeg );
return pSeg;
}
namespace GatherUpdates {
struct UpdHashFn { static inline uintptr_t Hash ( const CSphAttrUpdate* pK ) { return (uintptr_t) pK; } };
using HashedUpd_t = CSphOrderedHash<PostponedUpdate_t*, AttrUpdateSharedPtr_t, UpdHashFn, 256>;
inline bool CanBePeeked (int i, const CSphVector<HashedUpd_t>& dUpdates, const CSphVector<HashedUpd_t::Iterator_c>& dHeads) noexcept
{
ARRAY_CONSTFOREACH ( j, dHeads )
if ( dHeads[i]->first!=dHeads[j]->first && dUpdates[j].Exists ( dHeads[i]->first ) )
return false;
return true;
};
inline void Peek ( int i, const CSphVector<HashedUpd_t> & dUpdates, CSphVector<HashedUpd_t::Iterator_c> & dHeads, CSphVector<PostponedUpdate_t> & dSerializedUpdates )
{
auto a = dHeads[i]->first;
auto& tNewUpdate = dSerializedUpdates.Add();
tNewUpdate.m_pUpdate = a;
ARRAY_FOREACH ( j, dHeads )
if ( a==dHeads[j]->first )
{
tNewUpdate.m_dRowsToUpdate.Append ( dHeads[j]->second->m_dRowsToUpdate );
dHeads[j]->second->m_pUpdate = nullptr;
dHeads[j]->second->m_dRowsToUpdate.Reset();
++dHeads[j];
if ( dHeads[j]==dUpdates[j].end() )
dHeads.RemoveFast (j--);
}
};
const VecTraits_T<PostponedUpdate_t>& AccessPostponedUpdates ( const ConstRtSegmentRefPtf_t& pSeg ) { return pSeg->m_dPostponedUpdates; }
const VecTraits_T<PostponedUpdate_t>& AccessPostponedUpdates ( const ConstDiskChunkRefPtr_t& pChunk ) { return pChunk->Cidx ().m_dPostponedUpdates; }
template<typename CHUNK_OR_SEG>
CSphVector<PostponedUpdate_t> FromChunksOrSegments ( VecTraits_T<CHUNK_OR_SEG> dChunksOrSegments )
{
CSphVector<PostponedUpdate_t> dResult;
CSphVector<HashedUpd_t> dUpdates;
for ( const auto& dSeg : dChunksOrSegments )
{
const VecTraits_T<PostponedUpdate_t>& dPostponedUpdates = AccessPostponedUpdates (dSeg);
if ( dPostponedUpdates.IsEmpty () )
continue;
auto& dHash = dUpdates.Add();
for ( auto& tPostponedUpdate : dPostponedUpdates )
dHash.Add ( &tPostponedUpdate, tPostponedUpdate.m_pUpdate );
}
if ( dUpdates.IsEmpty() )
return dResult;
CSphVector<HashedUpd_t::Iterator_c> dHeads ( dUpdates.GetLength() );
ARRAY_CONSTFOREACH ( i, dUpdates )
dHeads[i] = dUpdates[i].begin();
while (!dHeads.IsEmpty())
ARRAY_FOREACH ( i, dHeads )
if ( CanBePeeked ( i, dUpdates, dHeads ) )
{
Peek ( i, dUpdates, dHeads, dResult );
break;
}
return dResult;
}
}; // namespace
// that is 2-nd part of postponed updates. We may have one or several update set, stored from old segments.
void RtSegment_t::UpdateAttributesOffline ( VecTraits_T<PostponedUpdate_t>& dPostUpdates ) NO_THREAD_SAFETY_ANALYSIS
{
if ( dPostUpdates.IsEmpty() )
return;
CSphString sError;
bool bCritical;
assert ( GetStride() == m_tSchema.GetRowSize() );
for ( auto & tPostUpdate : dPostUpdates )
{
AttrUpdateInc_t tUpdInc { std::move ( tPostUpdate.m_pUpdate ) };
UpdateContext_t tCtx ( tUpdInc, m_tSchema );
tCtx.PrepareListOfUpdatedAttributes ( sError );
// actualize list of updates in context of new segment
const auto & dDocids = tUpdInc.m_pUpdate->m_dDocids;
ARRAY_FOREACH ( i, tPostUpdate.m_dRowsToUpdate )
{
auto& tRow = tPostUpdate.m_dRowsToUpdate[i];
auto tRowID = GetAliveRowidByDocid ( dDocids[tRow.m_iIdx] );
if ( tRowID==INVALID_ROWID )
tPostUpdate.m_dRowsToUpdate.RemoveFast ( i-- );
else
tRow.m_tRow = tRowID;
}
tCtx.m_pAttrPool = m_dRows.begin();
tCtx.m_pBlobPool = m_dBlobs.begin();
Update_UpdateAttributes ( tPostUpdate.m_dRowsToUpdate, tCtx, bCritical, sError );
}
}
static void CleanupHitDuplicates ( CSphTightVector<CSphWordHit> & dHits )
{
TRACE_CONN ( "conn", "CleanupHitDuplicates" );
if ( dHits.GetLength()<2 )
return;
int iSrc = 1, iDst = 1;
while ( iSrc<dHits.GetLength() )
{
CSphWordHit & tDst = dHits[iDst-1];
const CSphWordHit & tSrc = dHits[iSrc];
DWORD uDstPos = HITMAN::GetPosWithField ( tDst.m_uWordPos );
DWORD uSrcPos = HITMAN::GetPosWithField ( tSrc.m_uWordPos );
DWORD uDstField = HITMAN::GetField ( tDst.m_uWordPos );
DWORD uSrcField = HITMAN::GetField ( tSrc.m_uWordPos );
bool bDstEnd = HITMAN::IsEnd ( tDst.m_uWordPos );
// check for pure duplicate and multiple tail hits
if ( tDst.m_tRowID==tSrc.m_tRowID && tDst.m_uWordID==tSrc.m_uWordID && ( uDstPos==uSrcPos || ( uDstField==uSrcField && bDstEnd ) ) )
{
if ( uDstPos==uSrcPos )
{
dHits[iDst] = dHits[iSrc];
} else if ( bDstEnd )
{
tDst.m_uWordPos = HITMAN::CreateSum ( tDst.m_uWordPos, 0 ); // reset field end flag
dHits[iDst] = dHits[iSrc];
iDst++;
}
iSrc++;
} else
{
dHits[iDst++] = dHits[iSrc++];
}
}
dHits.Resize ( iDst );
}
bool RtIndex_c::Commit ( int * pDeleted, RtAccum_t * pAcc, CSphString * pError )
{
TRACE_CONN ( "conn", "RtIndex_c::Commit" );
assert ( g_bRTChangesAllowed );
MEMORY ( MEM_INDEX_RT );
if ( !BindAccum ( pAcc ) )
return true;
// empty txn, just ignore
if ( !pAcc->m_uAccumDocs && pAcc->m_dAccumKlist.IsEmpty() )
{
pAcc->Cleanup();
return true;
}
if ( pAcc->GetIndexGeneration()!=m_iAlterGeneration )
{
if ( pError )
pError->SetSprintf( "Can't commit to table '%s', table was altered during txn!", GetName() );
return false;
}
// phase 0, build a new segment
// accum and segment are thread local; so no locking needed yet
// segment might be NULL if we're only killing rows this txn
pAcc->CleanupDuplicates ( m_tSchema.GetRowSize() );
pAcc->Sort();
CleanupHitDuplicates ( pAcc->m_dAccum );
CSphString sCreateError;
RtSegmentRefPtf_t pNewSeg { CreateSegment ( pAcc, m_iWordsCheckpoint, m_tSettings.m_eHitless, m_dHitlessWords, sCreateError ) };
if ( !pNewSeg && !sCreateError.IsEmpty() )
{
if ( pError )
*pError = sCreateError;
return false;
}
assert ( !pNewSeg || pNewSeg->m_uRows>0 );
assert ( !pNewSeg || pNewSeg->m_tAliveRows>0 );
BuildSegmentInfixes ( pNewSeg, m_pDict->HasMorphology(), m_bKeywordDict, m_tSettings.m_iMinInfixLen, m_iWordsCheckpoint, ( m_iMaxCodepointLength>1 ), m_tSettings.m_eHitless );
// clean up parts we no longer need
pAcc->CleanupPart();
// sort accum klist, too
pAcc->m_dAccumKlist.Uniq ();
// now on to the stuff that needs locking and recovery
int iKilled = 0;
CSphString sError;
if ( !CommitReplayable ( pNewSeg, pAcc->m_dAccumKlist, pAcc->m_iAccumBytes, iKilled, sError ) )
{
if ( pError )
*pError = sError;
return false;
}
if ( pDeleted )
*pDeleted = iKilled;
// done; cleanup accum
pAcc->Cleanup();
// reset accumulated warnings
CSphString sWarning;
pAcc->GrabLastWarning ( sWarning );
return true;
}
ConstRtSegmentRefPtf_t RtIndex_c::AdoptSegment ( RtSegment_t * pNewSeg )
{
ConstRtSegmentRefPtf_t tResult {pNewSeg};
if ( pNewSeg )
{
pNewSeg->AddRef ();
pNewSeg->m_pRAMCounter = &m_iRamChunksAllocatedRAM;
pNewSeg->UpdateUsedRam();
}
return tResult;
}
// CommitReplayable -> ApplyKillList
// AttachDiskIndex -> ApplyKillList
int RtIndex_c::ApplyKillList ( const VecTraits_T<DocID_t> & dAccKlist )
{
if ( dAccKlist.IsEmpty() )
return 0;
assert ( Coro::CurrentScheduler() == m_tWorkers.SerialChunkAccess() );
TRACE_SCHED ( "rt", "ApplyKillList" );
int iKilled = 0;
auto pChunks = m_tRtChunks.DiskChunks();
if ( !Threads::IsInsideCoroutine() || m_tSaving.ActiveStateIs ( SaveState_c::ENABLED ) )
for ( auto& pChunk : *pChunks )
iKilled += pChunk->CastIdx().KillMulti ( dAccKlist );
else
{
// if saving is disabled, and we NEED to actually mark a doc in disk chunk as deleted,
// we'll pause that action, waiting until index is unlocked.
bool bNeedWait = true;
bool bEnabled = false;
for ( auto& pChunk : *pChunks )
iKilled += pChunk->CastIdx().CheckThenKillMulti ( dAccKlist, [this,&bNeedWait, &bEnabled]()
{
if ( bNeedWait )
{
bNeedWait = false;
bEnabled = m_tSaving.WaitEnabledOrShutdown();
}
return bEnabled;
});
}
auto pSegs = m_tRtChunks.RamSegs();
for ( auto& pSeg : *pSegs )
iKilled += const_cast<RtSegment_t*> ( pSeg.Ptr() )->KillMulti ( dAccKlist );
return iKilled;
}
inline std::pair<int,int> Find2Minimums ( const VecTraits_T<ConstRtSegmentRefPtf_t>& dSegments )
{
assert ( dSegments.GetLength() > 1 );
auto fnLess = [&dSegments] ( int a, int b ) { return dSegments[a]->GetMergeFactor() < dSegments[b]->GetMergeFactor(); };
int a, b;
if ( fnLess ( 0, 1 ) )
a = 0, b = 1;
else
a = 1, b = 0;
for ( int i = 2; i < dSegments.GetLength(); ++i )
if ( fnLess ( i, a ) )
{
b = a;
a = i;
} else if ( fnLess ( i, b ) )
b = i;
return { a, b };
}
enum class CheckMerge_e { MERGE, NOMERGE, FLUSH, FLUSH_EM };
inline CheckMerge_e CheckSegmentsPair ( std::pair<const RtSegment_t*, const RtSegment_t*> tPair, int64_t iRamLeft=INT64_MAX ) NO_THREAD_SAFETY_ANALYSIS
{
const auto* pA = tPair.first;
const auto* pB = tPair.second;
int64_t iAlive = pA->m_tAliveRows.load ( std::memory_order_relaxed ) + pB->m_tAliveRows.load ( std::memory_order_relaxed );
DWORD uRows = pA->m_uRows + pB->m_uRows;
int64_t iEstimatedMergedSize=0;
int64_t iMaxFutureVecLen=0;
// check whether we have enough RAM
#define ESTIMATE( _v ) pA->_v.Relimit ( 0, ( (int64_t)pA->_v.GetLength() + pB->_v.GetLength() ) * iAlive / uRows )
#define LOC_ESTIMATE( _v ) do { auto _t=ESTIMATE(_v); iEstimatedMergedSize+=_t; if (iMaxFutureVecLen<_t) iMaxFutureVecLen=_t; } while (0)
LOC_ESTIMATE ( m_dWords );
LOC_ESTIMATE ( m_dDocs );
LOC_ESTIMATE ( m_dHits );
LOC_ESTIMATE ( m_dBlobs );
LOC_ESTIMATE ( m_dKeywordCheckpoints );
LOC_ESTIMATE ( m_dRows );
#undef LOC_ESTIMATE
#undef ESTIMATE
if ( iEstimatedMergedSize > iRamLeft )
return CheckMerge_e::NOMERGE;
if ( iMaxFutureVecLen > MAX_SEGMENT_VECTOR_LEN )
return CheckMerge_e::FLUSH;
return CheckMerge_e::MERGE;
}
inline CheckMerge_e CheckWeCanMerge ( std::pair<int, int>& tSmallest, const VecTraits_T<ConstRtSegmentRefPtf_t>& dSegments, int64_t iHardRamLeft, int64_t iSoftRamLeft, bool bNewAdded ) NO_THREAD_SAFETY_ANALYSIS
{
const int iSegs = dSegments.GetLength ();
RTLOGV << "CheckWeCanMerge(" << dSegments.GetLength() << " segs, ram soft limit " << iSoftRamLeft << " bytes, ram hard limit " << iHardRamLeft << " bytes)";
if ( !bNewAdded && iSegs < MAX_SEGMENTS )
return CheckMerge_e::NOMERGE;
auto eFLUSH = CheckMerge_e::FLUSH;
if ( iHardRamLeft<iSoftRamLeft )
{
iSoftRamLeft = iHardRamLeft;
eFLUSH = CheckMerge_e::FLUSH_EM; // emergency flush. I.e. hard limit reached
}
// skip merging if no memory left
if ( iSoftRamLeft <= 0 )
return eFLUSH;
// if N of segments is not so big - no merge need
if ( iSegs < ( MAX_SEGMENTS - MAX_PROGRESSION_SEGMENT ) )
return CheckMerge_e::NOMERGE;
// take 2 smallest segments
assert ( iSegs > 1 );
tSmallest = Find2Minimums ( dSegments );
ConstRtSegmentRefPtf_t & pA = dSegments[tSmallest.first];
ConstRtSegmentRefPtf_t & pB = dSegments[tSmallest.second];
// exit if progression is kept AND lesser MAX_SEGMENTS limit
if ( pB->GetMergeFactor() > pA->GetMergeFactor() * 2 && iSegs<MAX_SEGMENTS )
return CheckMerge_e::NOMERGE;
auto eDecision = CheckSegmentsPair ( {pA, pB}, iSoftRamLeft );
switch ( eDecision )
{
case CheckMerge_e::NOMERGE:
return ( iSegs >= MAX_SEGMENTS ) ? eFLUSH : CheckMerge_e::NOMERGE;
case CheckMerge_e::FLUSH:
return eFLUSH;
case CheckMerge_e::MERGE:
default:
return CheckMerge_e::MERGE;
}
}
static StringBuilder_c & operator<< ( StringBuilder_c & dOut, CheckMerge_e eVal )
{
switch ( eVal )
{
case CheckMerge_e::MERGE: return dOut << "MERGE";
case CheckMerge_e::NOMERGE: return dOut << "NOMERGE";
case CheckMerge_e::FLUSH: return dOut << "FLUSH";
case CheckMerge_e::FLUSH_EM: return dOut << "FLUSH_EM";
default: dOut.Sprintf ( "UNKNWN(%d)", (int)eVal );
}
return dOut;
}
void RtIndex_c::StartMergeSegments ( MergeSeg_e eMergeWhat, bool bNotify ) REQUIRES ( m_tWorkers.SerialChunkAccess() )
{
TRACE_SCHED ( "rt", "StartMergeSegments" );
m_eSegMergeQueued.ModifyValue ( [&eMergeWhat, &bNotify] ( MergeSeg_e& ePrevVal )
{
if ( ePrevVal == MergeSeg_e::EXIT )
bNotify = false;
else
ePrevVal = eMergeWhat;
});
if ( bNotify )
m_eSegMergeQueued.NotifyOne();
}
void RtIndex_c::StopMergeSegmentsWorker() REQUIRES ( m_tWorkers.SerialChunkAccess() )
{
TRACE_SCHED ( "rt", "StopMergeSegmentsWorker" );
m_eSegMergeQueued.SetValueAndNotifyOne ( MergeSeg_e::EXIT );
m_bSegMergeWorking.Wait ( [] ( bool bVal ) { return !bVal; } );
}
bool RtIndex_c::MergeSegmentsStep ( MergeSeg_e eVal ) REQUIRES ( m_tWorkers.SerialChunkAccess() )
{
TRACE_SCHED ( "rt", "MergeSegmentsStep" );
// collect all RAM segments not occupied by any op (only ops we know is 'merge segments' and 'save ram chunk')
int64_t iHardRamLeft { m_iRtMemLimit };
int64_t iSoftRamLeft { m_iSoftRamLimit };
LazyVector_T<ConstRtSegmentRefPtf_t> dSegments;
for ( const auto& dSeg : *m_tRtChunks.RamSegs() )
{
if ( !dSeg->m_iLocked )
{
dSegments.Add ( dSeg );
iSoftRamLeft -= dSeg->GetUsedRam();
}
iHardRamLeft -= dSeg->GetUsedRam();
}
TRACE_COUNTER ( "mem", perfetto::CounterTrack ( "SoftRamLeft", "bytes" ).set_unit_multiplier ( 1024 ), iSoftRamLeft >> 10 );
TRACE_COUNTER ( "mem", perfetto::CounterTrack ( "HardRamLeft", "bytes" ).set_unit_multiplier ( 1024 ), iHardRamLeft >> 10 );
RTLOGV << "Totally we have " << m_tRtChunks.GetRamSegmentsCount() << " segments onboard.";
std::pair<int, int> tSmallest;
auto eMergeAction = CheckWeCanMerge ( tSmallest, dSegments, iHardRamLeft, iSoftRamLeft, eVal == MergeSeg_e::NEWSEG );
RTLOGV << "CheckWeCanMerge returned " << eMergeAction;
if ( eMergeAction == CheckMerge_e::FLUSH || eMergeAction == CheckMerge_e::FLUSH_EM )
{
// here it might be no race, as we're in serial worker.
TRACE_SCHED ( "wait", "MergeSegmentsStep-wait-save" );
auto iOldGen = m_iSaveGeneration;
m_tNSavesNow.Wait ( [] ( int iVal ) { return iVal < SIMULTANEOUS_SAVE_LIMIT; } );
// if a save finished during wait - limits and conditions may be changed, will restart to check whether save is still necessary
if ( m_iSaveGeneration != iOldGen )
{
RTLOGV << "Recheck due to just finished SaveDiskChunk";
return true;
}
Coro::Go ( [this, eMergeAction]() REQUIRES ( m_tWorkers.SerialChunkAccess() ) {
SaveDiskChunk ( false, eMergeAction == CheckMerge_e::FLUSH_EM );
}, m_tWorkers.SerialChunkAccess() );
return false; // exit into idle
}
int iMergeOp = 0;
RtSegmentRefPtf_t pMerged { nullptr };
if ( eMergeAction == CheckMerge_e::MERGE )
{
assert ( dSegments.GetLength() >= 2 );
ConstRtSegmentRefPtf_t pA = dSegments[tSmallest.first];
ConstRtSegmentRefPtf_t pB = dSegments[tSmallest.second];
iMergeOp = m_tWorkers.GetNextOpTicket();
pA->m_iLocked = pB->m_iLocked = iMergeOp; // mark them as retiring.
pMerged = MergeTwoSegments ( pA, pB );
if ( pMerged && pMerged->m_tAliveRows.load ( std::memory_order_relaxed ) )
{
// some updates might be applied to pA and pB during the merge. Now it is time to apply them also
// to the merged segment.
LazyVector_T<ConstRtSegmentRefPtf_t> dOld;
dOld.Add ( pA );
dOld.Add ( pB );
auto dUpdates = GatherUpdates::FromChunksOrSegments ( dOld );
pMerged->UpdateAttributesOffline ( dUpdates );
}
}
// merged might be killed during merge op
// as we run in serial fiber, there is no concurrency, and so, killing hooks of retired sources will not fire.
assert ( Coro::CurrentScheduler() == m_tWorkers.SerialChunkAccess() );
if ( pMerged && !pMerged->m_tAliveRows.load ( std::memory_order_relaxed ) )
pMerged = nullptr;
// we collect after merge, as some data might be changed during the merge
for ( auto& pSeg : dSegments )
if ( !pSeg->m_tAliveRows.load ( std::memory_order_relaxed ) )
{
if ( !iMergeOp )
iMergeOp = m_tWorkers.GetNextOpTicket();
pSeg->m_iLocked = iMergeOp;
}
dSegments.Reset();
// nothing merged, and also nothing killed - nothing to do, exit into idle
if ( !pMerged && !iMergeOp )
return false;
auto tNewSet = RtWriter();
tNewSet.InitRamSegs ( RtWriter_c::empty );
for ( const auto& pSeg : *m_tRtChunks.RamSegs() )
if ( pSeg->m_iLocked != iMergeOp )
tNewSet.m_pNewRamSegs->Add ( pSeg );
if ( pMerged )
tNewSet.m_pNewRamSegs->Add ( AdoptSegment ( pMerged ) );
RTRLOG << "after merge " << tNewSet.m_pNewRamSegs->GetLength() << " segments on-board";
TRACE_COUNTER ( "rt", perfetto::CounterTrack ( "Segments" ), tNewSet.m_pNewRamSegs->GetLength() );
// chain next step
return true;
}
void RtIndex_c::RunMergeSegmentsWorker()
{
Coro::Go ( [this]() REQUIRES ( m_tWorkers.SerialChunkAccess() )
{
TRACE_SCHED ( "rt", "RunMergeSegmentsWorker-lambda" );
m_bSegMergeWorking.SetValueAndNotifyOne ( true );
auto tResetSegMergeWorking = AtScopeExit ( [this] { m_bSegMergeWorking.SetValueAndNotifyOne ( false ); } );
while (true)
{
m_eSegMergeQueued.Wait ( [] ( MergeSeg_e eVal ) { return eVal != MergeSeg_e::NONE; } );
auto eVal = m_eSegMergeQueued.ExchangeValue ( MergeSeg_e::NONE );
assert ( eVal != MergeSeg_e::NONE );
if ( eVal==MergeSeg_e::EXIT )
return;
if ( MergeSegmentsStep ( eVal ) )
StartMergeSegments ( MergeSeg_e::NEWSEG, false );
}
}, m_tWorkers.SerialChunkAccess() );
}
namespace {
int CommitID() {
static std::atomic<int> iWorker { 0 };
return iWorker.fetch_add ( 1, std::memory_order_relaxed );
}
} // namespace
bool RtIndex_c::CommitReplayable ( RtSegment_t * pNewSeg, const VecTraits_T<DocID_t> & dAccKlist, int64_t iAddTotalBytes, int & iTotalKilled, CSphString & sError ) REQUIRES_SHARED ( pNewSeg->m_tLock )
{
// store statistics, because pNewSeg just might get merged
const int iId = CommitID();
MAYBE_UNUSED ( iId );
TRACE_VARID ( "rt", "CommitReplayable", iId );
int iNewDocs = pNewSeg ? (int)pNewSeg->m_uRows : 0;
// helpers for SPH_ATTR_TOKENCOUNT attributes
CSphVector<int64_t> dLens;
int iFirstFieldLenAttr = m_tSchema.GetAttrId_FirstFieldLen();
if ( pNewSeg && iFirstFieldLenAttr>=0 )
{
assert ( pNewSeg->GetStride()==m_iStride );
int iFields = m_tSchema.GetFieldsCount(); // shortcut
dLens.Resize ( iFields );
dLens.Fill ( 0 );
for ( DWORD i=0; i<pNewSeg->m_uRows; ++i )
for ( int j=0; j<iFields; ++j )
dLens[j] += sphGetRowAttr ( pNewSeg->GetDocinfoByRowID(i), m_tSchema.GetAttr ( j+iFirstFieldLenAttr ).m_tLocator );
}
if ( pNewSeg && !CheckSegmentConsistency ( pNewSeg, false ) )
DumpInsert ( pNewSeg );
// We're going to modify segments, so fall into serial fiber. From here no concurrent changes may happen
ScopedScheduler_c tSerialFiber { m_tWorkers.SerialChunkAccess() };
// for pure kills it is not necessary to wait, as it can't increase N of segments.
if ( pNewSeg )
{
TRACE_VARID ( "rt", "wait_segments", iId );
m_tUnLockedSegments.Wait ( [] ( int iVals ) { return iVals < MAX_SEGMENTS; } );
}
TRACE_VARID ( "rt", "CommitReplayable.serial", iId );
RTLOGV << "CommitReplayable";
// first of all, binlog txn data for recovery
if ( !BinlogCommit ( pNewSeg, dAccKlist, iAddTotalBytes, sError ) )
return false;
// 1. Apply kill-list to existing chunks/segments
iTotalKilled = ApplyKillList ( dAccKlist );
// 2. Add new RAM-segment (if any). As we 1-st kill, then add - whole change is *not* atomic, ACID is broken here.
if ( pNewSeg )
{
auto tNewState = RtWriter();
tNewState.InitRamSegs ( RtWriter_c::copy );
tNewState.m_pNewRamSegs->Add ( AdoptSegment ( pNewSeg ) );
}
// update stats
m_tStats.m_iTotalDocuments += iNewDocs - iTotalKilled;
m_tStats.m_iTotalBytes += iAddTotalBytes;
if ( dLens.GetLength() )
for ( int i = 0; i < m_tSchema.GetFieldsCount(); ++i )
{
m_dFieldLensRam[i] += dLens[i];
m_dFieldLens[i] = m_dFieldLensRam[i] + m_dFieldLensDisk[i];
}
// backoff segments merging and m.b. saving disk chunk (that is not our deal, other worker will do it).
StartMergeSegments ( pNewSeg ? MergeSeg_e::NEWSEG : MergeSeg_e::KILLED );
return true;
}
void RtIndex_c::RollBack ( RtAccum_t * pAcc )
{
assert ( g_bRTChangesAllowed );
if ( BindAccum ( pAcc ) )
pAcc->Cleanup ();
}
bool RtIndex_c::DeleteDocument ( const VecTraits_T<DocID_t> & dDocs, CSphString & sError, RtAccum_t * pAcc )
{
assert ( g_bRTChangesAllowed );
MEMORY ( MEM_RT_ACCUM );
if ( !BindAccum ( pAcc, &sError ) )
return false;
// !COMMIT should handle case when uDoc what inserted in current txn here
pAcc->m_dAccumKlist.Append ( dDocs );
return true;
}
//////////////////////////////////////////////////////////////////////////
// LOAD/SAVE
//////////////////////////////////////////////////////////////////////////
struct Checkpoint_t
{
uint64_t m_uWord;
uint64_t m_uOffset;
};
bool RtIndex_c::ForceDiskChunk()
{
MEMORY ( MEM_INDEX_RT );
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
return SaveDiskChunk ( true );
}
struct SaveDiskDataContext_t : public BuildHeader_t
{
SphOffset_t m_tDocsOffset {0};
SphOffset_t m_tLastDocPos {0};
std::unique_ptr<ISphInfixBuilder> m_pInfixer;
CSphVector<Checkpoint_t> m_dCheckpoints;
CSphVector<BYTE> m_dKeywordCheckpoints;
CSphVector<CSphVector<RowID_t>> m_dRowMaps;
IndexFileBase_c m_tFilebase;
const ConstRtSegmentSlice_t& m_tRamSegments;
SaveDiskDataContext_t ( const char * szFilename, const ConstRtSegmentSlice_t& tSegs )
: m_tFilebase ( szFilename )
, m_tRamSegments ( tSegs )
{
m_dRowMaps.Reserve ( m_tRamSegments.GetLength() );
for ( const auto & pSeg : m_tRamSegments )
{
auto& dRowMap = m_dRowMaps.Add();
dRowMap.Reserve ( pSeg->m_uRows );
dRowMap.Resize ( pSeg->m_uRows ); // need space for ALL rows, incl. dead (they will be INVALID_ROWID)
dRowMap.Fill ( INVALID_ROWID );
}
assert ( m_dRowMaps.GetLength() == m_tRamSegments.GetLength() );
}
};
bool RtIndex_c::WriteAttributes ( SaveDiskDataContext_t & tCtx, CSphString & sError ) const
{
auto sSPA = tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPA );
auto sSPB = tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPB );
auto sSPT = tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPT );
auto sSPHI = tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPHI );
auto sSPDS = tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPDS );
auto sSPC = tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPC );
auto sSIdx = tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPIDX );
auto sJsonSIdx = tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPJIDX );
auto sSKNN = tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPKNN );
CSphWriter tWriterSPA;
if ( !tWriterSPA.OpenFile ( sSPA, sError ) )
return false;
const CSphColumnInfo * pBlobLocatorAttr = m_tSchema.GetAttr ( sphGetBlobLocatorName() );
AttrIndexBuilder_c tMinMaxBuilder(m_tSchema);
BuildBufferSettings_t tSettings; // use default buffer settings
std::unique_ptr<BlobRowBuilder_i> pBlobRowBuilder;
if ( pBlobLocatorAttr )
{
pBlobRowBuilder = sphCreateBlobRowBuilder ( m_tSchema, sSPB, m_tSettings.m_tBlobUpdateSpace, tSettings.m_iBufferAttributes, sError );
if ( !pBlobRowBuilder )
return false;
}
std::unique_ptr<DocstoreBuilder_i> pDocstoreBuilder;
if ( m_tSchema.HasStoredFields() || m_tSchema.HasStoredAttrs() )
{
pDocstoreBuilder = CreateDocstoreBuilder ( sSPDS, m_tSettings, tSettings.m_iBufferStorage, sError );
if ( !pDocstoreBuilder )
return false;
SetupDocstoreFields ( *pDocstoreBuilder, m_tSchema );
}
std::unique_ptr<columnar::Builder_i> pColumnarBuilder;
if ( m_tSchema.HasColumnarAttrs() )
{
pColumnarBuilder = CreateColumnarBuilder ( m_tSchema, sSPC, tSettings.m_iBufferColumnar, sError );
if ( !pColumnarBuilder )
return false;
}
HistogramContainer_c tHistograms;
CSphVector<PlainOrColumnar_t> dAttrsForHistogram;
BuildCreateHistograms ( tHistograms, dAttrsForHistogram, m_tSchema );
CSphVector<PlainOrColumnar_t> dSiAttrs;
std::unique_ptr<SI::Builder_i> pSIdxBuilder;
std::unique_ptr<JsonSIBuilder_i> pJsonSIBuilder;
if ( IsSecondaryLibLoaded() )
{
pSIdxBuilder = CreateIndexBuilder ( m_iRtMemLimit, m_tSchema, sSIdx, dSiAttrs, tSettings.m_iBufferStorage, sError );
if ( !pSIdxBuilder )
return false;
if ( m_tSchema.HasJsonSIAttrs() )
{
pJsonSIBuilder = CreateJsonSIBuilder ( m_tSchema, sSPB, sJsonSIdx, sError );
if ( !pJsonSIBuilder )
return false;
}
}
tCtx.m_iTotalDocuments = 0;
for ( const auto & i : tCtx.m_tRamSegments )
tCtx.m_iTotalDocuments += i->m_tAliveRows.load ( std::memory_order_relaxed );
CSphVector<PlainOrColumnar_t> dAttrsForKNN;
std::unique_ptr<knn::Builder_i> pKNNBuilder;
if ( m_tSchema.HasKNNAttrs() )
{
pKNNBuilder = BuildCreateKNN ( m_tSchema, tCtx.m_iTotalDocuments, dAttrsForKNN, sError );
if ( !pKNNBuilder )
return false;
}
CSphFixedVector<DocidRowidPair_t> dRawLookup ( tCtx.m_iTotalDocuments );
int iColumnarIdLoc = -1;
if ( m_tSchema.GetAttr(0).IsColumnar() )
iColumnarIdLoc = 0;
CSphVector<int64_t> dTmp;
RowID_t tNextRowID = 0;
int iStride = m_tSchema.GetRowSize();
auto iStrideBytes = sizeof ( CSphRowitem ) * iStride;
CSphFixedVector<CSphRowitem> dNewRow { iStride };
CSphRowitem * pNewRow = dNewRow.Begin();
ARRAY_FOREACH ( i, tCtx.m_tRamSegments )
{
const auto & tSeg = *tCtx.m_tRamSegments[i];
SccRL_t rLock ( tSeg.m_tLock );
tSeg.m_bAttrsBusy.store ( true, std::memory_order_release );
auto dColumnarIterators = CreateAllColumnarIterators ( tSeg.m_pColumnar.get(), m_tSchema );
for ( auto tRowID : RtLiveRows_c(tSeg) )
{
const CSphRowitem * pRow = tSeg.m_dRows.Begin() + (int64_t)tRowID*iStride;
tMinMaxBuilder.Collect(pRow);
if ( pBlobLocatorAttr )
{
auto tSrcOffset = sphGetRowAttr ( pRow, pBlobLocatorAttr->m_tLocator );
auto tTargetOffsetSize = pBlobRowBuilder->Flush ( tSeg.m_dBlobs.Begin() + tSrcOffset );
memcpy ( pNewRow, pRow, iStrideBytes );
sphSetRowAttr ( pNewRow, pBlobLocatorAttr->m_tLocator, tTargetOffsetSize.first );
tWriterSPA.PutBytes ( pNewRow, (int64_t)iStrideBytes );
if ( pJsonSIBuilder )
pJsonSIBuilder->AddRowOffsetSize ( tTargetOffsetSize );
}
else
tWriterSPA.PutBytes ( pRow, (int64_t)iStrideBytes );
DocID_t tDocID;
ARRAY_FOREACH ( iIterator, dColumnarIterators )
{
auto & tIterator = dColumnarIterators[iIterator];
SphAttr_t tAttr = SetColumnarAttr ( iIterator, tIterator.second, pColumnarBuilder.get(), tIterator.first, tRowID, dTmp );
if ( iIterator==iColumnarIdLoc )
tDocID = tAttr;
}
if ( iColumnarIdLoc<0 )
tDocID = sphGetDocID(pRow);
BuildStoreHistograms ( tRowID, pRow, tSeg.m_dBlobs.Begin(), dColumnarIterators, dAttrsForHistogram, tHistograms );
if ( pSIdxBuilder.get() )
{
pSIdxBuilder->SetRowID ( tNextRowID );
BuildStoreSI ( tRowID, pRow, tSeg.m_dBlobs.Begin(), dColumnarIterators, dSiAttrs, pSIdxBuilder.get(), dTmp );
}
dRawLookup[tNextRowID] = { tDocID, tNextRowID };
if ( pDocstoreBuilder )
{
assert ( tSeg.m_pDocstore );
pDocstoreBuilder->AddDoc ( tNextRowID, tSeg.m_pDocstore->GetDoc ( tRowID, nullptr, -1, false ) );
}
if ( pKNNBuilder && !BuildStoreKNN ( tRowID, pRow, tSeg.m_dBlobs.Begin(), dColumnarIterators, dAttrsForKNN, *pKNNBuilder ) )
{
sError = pKNNBuilder->GetError().c_str();
return false;
}
tCtx.m_dRowMaps[i][tRowID] = tNextRowID++;
}
}
// rows could be killed during index save and tNextRowID could be less than tCtx.m_iTotalDocuments \ initial count
assert ( tNextRowID<=(RowID_t)dRawLookup.GetLength() );
VecTraits_T<DocidRowidPair_t> dLookup ( dRawLookup.Begin(), tNextRowID );
std::string sErrorSTL;
if ( pColumnarBuilder && !pColumnarBuilder->Done(sErrorSTL) )
{
sError = sErrorSTL.c_str();
return false;
}
if ( pBlobRowBuilder && !pBlobRowBuilder->Done ( sError ) )
return false;
if ( pDocstoreBuilder )
pDocstoreBuilder->Finalize();
if ( pKNNBuilder && !pKNNBuilder->Save ( sSKNN.cstr(), tSettings.m_iBufferStorage, sErrorSTL ) )
{
sError = sErrorSTL.c_str();
return false;
}
dLookup.Sort ( CmpDocidLookup_fn() );
if ( !WriteDocidLookup ( sSPT, dLookup, sError ) )
return false;
dRawLookup.Reset(0);
if ( !tHistograms.Save ( sSPHI, sError ) )
return false;
tMinMaxBuilder.FinishCollect();
tCtx.m_iDocinfo = tNextRowID;
if ( tCtx.m_iDocinfo && m_tSchema.HasNonColumnarAttrs() )
{
const CSphTightVector<CSphRowitem> & dMinMaxRows = tMinMaxBuilder.GetCollected();
tCtx.m_iMinMaxIndex = tWriterSPA.GetPos () / sizeof ( CSphRowitem );
tCtx.m_iDocinfoIndex = ( dMinMaxRows.GetLength() / m_tSchema.GetRowSize() / 2 ) - 1;
tWriterSPA.PutBytes ( dMinMaxRows.Begin(), dMinMaxRows.GetLength()*sizeof(CSphRowitem) );
}
tWriterSPA.CloseFile();
if ( tWriterSPA.IsError() )
return false;
std::string sSidxError;
if ( pSIdxBuilder.get() && !pSIdxBuilder->Done ( sSidxError ) )
{
sError = sSidxError.c_str();
return false;
}
if ( pJsonSIBuilder && !pJsonSIBuilder->Done(sError) )
return false;
return true;
}
bool RtIndex_c::WriteDocs ( SaveDiskDataContext_t & tCtx, CSphWriter & tWriterDict, CSphString & sError ) const
{
CSphWriter tWriterHits, tWriterDocs, tWriterSkips;
if ( !tWriterHits.OpenFile ( tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPP ), sError ) )
return false;
if ( !tWriterDocs.OpenFile ( tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPD ), sError ) )
return false;
if ( !tWriterSkips.OpenFile ( tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPE ), sError ) )
return false;
tWriterHits.PutByte(1);
tWriterDocs.PutByte(1);
tWriterSkips.PutByte(1);
int iSegments = tCtx.m_tRamSegments.GetLength();
RawVector_T<RtWordReader_c> dWordReaders;
dWordReaders.Reserve_static ( iSegments );
CSphVector<const RtWord_t*> dWords(iSegments);
for ( int i = 0; i < iSegments; ++i )
{
dWordReaders.Emplace_back ( tCtx.m_tRamSegments[i], m_bKeywordDict, m_iWordsCheckpoint, m_tSettings.m_eHitless );
dWords[i] = dWordReaders.Last().UnzipWord();
}
// loop keywords
int iWords = 0;
CSphKeywordDeltaWriter tLastWord;
SphWordID_t uLastWordID = 0;
CSphVector<SkiplistEntry_t> dSkiplist;
tCtx.m_tLastDocPos = 0;
bool bHasMorphology = m_pDict->HasMorphology();
int iSkiplistBlockSize = m_tSettings.m_iSkiplistBlockSize;
assert ( iSkiplistBlockSize>0 );
while (true)
{
// find keyword with min id
const RtWord_t * pWord = nullptr;
for ( auto & i : dWords )
if ( CompareWords ( i, pWord ) < 0 )
pWord = i;
if ( !pWord )
break;
SphOffset_t uDocpos = tWriterDocs.GetPos();
SphOffset_t uLastHitpos = 0;
RowID_t tLastRowID = INVALID_ROWID;
RowID_t tSkiplistRowID = INVALID_ROWID;
int iDocs = 0;
int iHits = 0;
dSkiplist.Resize(0);
// loop all segments that have this keyword
CSphBitvec tSegsWithWord ( iSegments );
ARRAY_FOREACH ( iSegment, dWords )
{
if ( !CompareWords ( dWords[iSegment], pWord ) )
tSegsWithWord.BitSet(iSegment);
else
continue;
RtDocReader_c tDocReader ( tCtx.m_tRamSegments[iSegment], *dWords[iSegment] );
while ( tDocReader.UnzipDoc() )
{
const auto* pDoc = (const RtDoc_t*)tDocReader;
RowID_t tRowID = tCtx.m_dRowMaps[iSegment][pDoc->m_tRowID];
if ( tRowID==INVALID_ROWID )
continue;
// build skiplist, aka save decoder state as needed
if ( ( iDocs & ( iSkiplistBlockSize-1 ) )==0 )
{
SkiplistEntry_t & t = dSkiplist.Add();
t.m_tBaseRowIDPlus1 = tSkiplistRowID+1;
t.m_iOffset = tWriterDocs.GetPos();
t.m_iBaseHitlistPos = uLastHitpos;
}
++iDocs;
iHits += pDoc->m_uHits;
tSkiplistRowID = tRowID;
tWriterDocs.ZipOffset ( tRowID - std::exchange ( tLastRowID, tRowID ) );
tWriterDocs.ZipInt ( pDoc->m_uHits );
if ( pDoc->m_uHits==1 && pWord->m_bHasHitlist )
{
tWriterDocs.ZipInt ( pDoc->m_uHit & 0x7FFFFFUL );
tWriterDocs.ZipInt ( pDoc->m_uHit >> 23 );
} else
{
tWriterDocs.ZipInt ( pDoc->m_uDocFields );
tWriterDocs.ZipOffset ( tWriterHits.GetPos() - std::exchange ( uLastHitpos, tWriterHits.GetPos() ) );
}
// loop hits from current segment
if ( pDoc->m_uHits>1 )
{
DWORD uLastHit = 0;
RtHitReader_c tInHits ( *tCtx.m_tRamSegments[iSegment], *pDoc );
while ( DWORD uValue = tInHits.UnzipHit() )
tWriterHits.ZipInt ( uValue - std::exchange ( uLastHit, uValue ) );
tWriterHits.ZipInt(0);
}
}
}
// write skiplist
int64_t iSkiplistOff = tWriterSkips.GetPos();
for ( int i=1; i<dSkiplist.GetLength(); ++i )
{
const SkiplistEntry_t & tPrev = dSkiplist[i-1];
const SkiplistEntry_t & tCur = dSkiplist[i];
assert ( tCur.m_tBaseRowIDPlus1 - tPrev.m_tBaseRowIDPlus1>=(DWORD)iSkiplistBlockSize );
assert ( tCur.m_iOffset - tPrev.m_iOffset>=4*iSkiplistBlockSize );
tWriterSkips.ZipInt ( tCur.m_tBaseRowIDPlus1 - tPrev.m_tBaseRowIDPlus1 - iSkiplistBlockSize );
tWriterSkips.ZipOffset ( tCur.m_iOffset - tPrev.m_iOffset - 4*iSkiplistBlockSize );
tWriterSkips.ZipOffset ( tCur.m_iBaseHitlistPos - tPrev.m_iBaseHitlistPos );
}
// write dict entry if necessary
if ( tWriterDocs.GetPos()!=uDocpos )
{
tWriterDocs.ZipInt ( 0 ); // docs over
if ( ( iWords%SPH_WORDLIST_CHECKPOINT )==0 )
{
if ( iWords )
{
SphOffset_t uOff = m_bKeywordDict ? 0 : uDocpos - tCtx.m_tLastDocPos;
tWriterDict.ZipInt ( 0 );
tWriterDict.ZipOffset ( uOff ); // store last hitlist length
}
// restart delta coding, once per SPH_WORDLIST_CHECKPOINT entries
tCtx.m_tLastDocPos = 0;
uLastWordID = 0;
tLastWord.Reset();
// begin new wordlist entry
Checkpoint_t & tChk = tCtx.m_dCheckpoints.Add();
tChk.m_uOffset = tWriterDict.GetPos();
if ( m_bKeywordDict )
tChk.m_uWord = sphPutBytes ( &tCtx.m_dKeywordCheckpoints, pWord->m_sWord, pWord->m_sWord[0]+1 ); // copy word len + word itself to checkpoint storage
else
tChk.m_uWord = pWord->m_uWordID;
}
++iWords;
if ( m_bKeywordDict )
{
tLastWord.PutDelta ( tWriterDict, pWord->m_sWord+1, pWord->m_sWord[0] );
tWriterDict.ZipOffset ( uDocpos );
} else
{
assert ( pWord->m_uWordID!=uLastWordID );
tWriterDict.ZipOffset ( pWord->m_uWordID - uLastWordID );
uLastWordID = pWord->m_uWordID;
assert ( uDocpos>tCtx.m_tLastDocPos );
tWriterDict.ZipOffset ( uDocpos - tCtx.m_tLastDocPos );
}
DWORD iDocsCount = iDocs;
if ( !pWord->m_bHasHitlist && m_tSettings.m_eHitless==SPH_HITLESS_SOME )
iDocsCount |= HITLESS_DOC_FLAG;
tWriterDict.ZipInt ( iDocsCount );
tWriterDict.ZipInt ( iHits );
if ( m_bKeywordDict )
{
BYTE uHint = sphDoclistHintPack ( iDocs, tWriterDocs.GetPos()-tCtx.m_tLastDocPos );
if ( uHint )
tWriterDict.PutByte ( uHint );
// build infixes
if ( tCtx.m_pInfixer )
tCtx.m_pInfixer->AddWord ( pWord->m_sWord+1, pWord->m_sWord[0], tCtx.m_dCheckpoints.GetLength(), bHasMorphology );
}
// emit skiplist pointer
if ( iDocs>iSkiplistBlockSize )
tWriterDict.ZipOffset ( iSkiplistOff );
tCtx.m_tLastDocPos = uDocpos;
}
// read next words
for ( int i = 0; i < tSegsWithWord.GetSize(); ++i )
if ( tSegsWithWord.BitGet(i) )
dWords[i] = dWordReaders[i].UnzipWord();
}
tCtx.m_tDocsOffset = tWriterDocs.GetPos();
tWriterHits.CloseFile();
tWriterDocs.CloseFile();
tWriterSkips.CloseFile();
return true;
}
void RtIndex_c::WriteCheckpoints ( SaveDiskDataContext_t & tCtx, CSphWriter & tWriterDict ) const
{
// write checkpoints
SphOffset_t uOff = m_bKeywordDict ? 0 : tCtx.m_tDocsOffset - tCtx.m_tLastDocPos;
tWriterDict.ZipInt ( 0 ); // indicate checkpoint
tWriterDict.ZipOffset ( uOff ); // store last doclist length
// flush infix hash entries, if any
if ( tCtx.m_pInfixer )
tCtx.m_pInfixer->SaveEntries ( tWriterDict );
tCtx.m_iDictCheckpointsOffset = tWriterDict.GetPos();
if ( m_bKeywordDict )
{
const char * pCheckpoints = (const char *)tCtx.m_dKeywordCheckpoints.Begin();
for ( const auto & i : tCtx.m_dCheckpoints )
{
const char * pPacked = pCheckpoints + i.m_uWord;
int iLen = *pPacked;
assert ( iLen && (int)i.m_uWord+1+iLen<=tCtx.m_dKeywordCheckpoints.GetLength() );
tWriterDict.PutDword ( iLen );
tWriterDict.PutBytes ( pPacked+1, iLen );
tWriterDict.PutOffset ( i.m_uOffset );
}
} else
{
for ( const auto & i : tCtx.m_dCheckpoints )
{
tWriterDict.PutOffset ( i.m_uWord );
tWriterDict.PutOffset ( i.m_uOffset );
}
}
// flush infix hash blocks
if ( tCtx.m_pInfixer )
{
tCtx.m_iInfixBlocksOffset = tCtx.m_pInfixer->SaveEntryBlocks ( tWriterDict );
tCtx.m_iInfixBlocksWordsSize = tCtx.m_pInfixer->GetBlocksWordsSize();
if ( tCtx.m_iInfixBlocksOffset>UINT_MAX )
sphWarning ( "INTERNAL ERROR: dictionary size " INT64_FMT " overflow at infix save", tCtx.m_iInfixBlocksOffset );
}
// flush header
// mostly for debugging convenience
// primary storage is in the index wide header
tWriterDict.PutBlob ( g_sTagDictHeader );
tWriterDict.ZipInt ( tCtx.m_dCheckpoints.GetLength() );
tWriterDict.ZipOffset ( tCtx.m_iDictCheckpointsOffset );
tWriterDict.ZipInt ( m_pTokenizer->GetMaxCodepointLength() );
tWriterDict.ZipInt ( (DWORD)tCtx.m_iInfixBlocksOffset );
}
bool RtIndex_c::WriteDeadRowMap ( SaveDiskDataContext_t & tCtx, CSphString & sError ) // static
{
CSphString sName = tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPM );
return ::WriteDeadRowMap ( sName, tCtx.m_iDocinfo, sError );
}
struct FilesCleanup_t
{
explicit FilesCleanup_t ( const char * sFilename )
: m_tFiles ( sFilename )
{
}
~FilesCleanup_t()
{
if ( m_bRemoveFiles )
m_tFiles.UnlinkExisted();
}
IndexFiles_c m_tFiles;
bool m_bRemoveFiles = true;
};
// SaveDiskChunk -> SaveDiskData
// RO save RAM chunks from tSegs into new disk chunk (nothing added/released, just disk files created)
bool RtIndex_c::SaveDiskData ( const char * szFilename, const ConstRtSegmentSlice_t & tSegs, const ChunkStats_t & tStats, CSphString & sError ) const
{
RTSAVELOG << "SaveDiskData to " << szFilename << ", " << tSegs.GetLength() << " segments";
FilesCleanup_t tFiles ( szFilename );
sError = "";
SaveDiskDataContext_t tCtx ( szFilename, tSegs ); // only RAM segments here in game.
if ( m_tSettings.m_iMinInfixLen && m_pDict->GetSettings().m_bWordDict )
tCtx.m_pInfixer = sphCreateInfixBuilder ( m_pTokenizer->GetMaxCodepointLength(), &sError );
if ( !sError.IsEmpty() )
return false;
// PauseCheck("savepause"); // catch if something happened between RtGuard() and actual updates
// fixme: handle errors
if ( !WriteAttributes ( tCtx, sError ) )
return false;
if ( !WriteDeadRowMap ( tCtx, sError ) )
return false;
CSphWriter tWriterDict;
CSphString sSPI = IndexFileBase_c { szFilename }.GetFilename ( SPH_EXT_SPI );
if ( !tWriterDict.OpenFile ( sSPI.cstr(), sError ) )
return false;
tWriterDict.PutByte ( 1 );
if ( !WriteDocs ( tCtx, tWriterDict, sError ) )
return false;
WriteCheckpoints ( tCtx, tWriterDict );
tWriterDict.CloseFile();
if ( tWriterDict.IsError() )
return false;
if ( !SaveDiskHeader ( tCtx, tStats, sError ) )
return false;
tFiles.m_bRemoveFiles = false;
return true;
}
static void FixupIndexSettings ( CSphIndexSettings & tSettings )
{
tSettings.m_eHitFormat = SPH_HIT_FORMAT_INLINE;
tSettings.m_iBoundaryStep = 0;
tSettings.m_iStopwordStep = 1;
tSettings.m_iOvershortStep = 1;
}
// SaveDiskChunk -> SaveDiskData -> SaveDiskHeader
bool RtIndex_c::SaveDiskHeader ( SaveDiskDataContext_t & tCtx, const ChunkStats_t & tStats, CSphString & sError ) const
{
tCtx.m_iDictCheckpoints = tCtx.m_dCheckpoints.GetLength ();
tCtx.m_iInfixCodepointBytes = ( m_tSettings.m_iMinInfixLen && m_pDict->GetSettings ().m_bWordDict )
? m_pTokenizer->GetMaxCodepointLength ()
: 0;
tCtx.m_iTotalBytes = tStats.m_Stats.m_iTotalBytes;
CSphIndexSettings tSettings = m_tSettings;
FixupIndexSettings ( tSettings );
WriteHeader_t tWriteHeader;
tWriteHeader.m_pSettings = &tSettings;
tWriteHeader.m_pSchema = &m_tSchema;
tWriteHeader.m_pTokenizer = m_pTokenizer;
tWriteHeader.m_pDict = m_pDict;
tWriteHeader.m_pFieldFilter = m_pFieldFilter.get();
tWriteHeader.m_pFieldLens = m_dFieldLens.Begin();
CSphString sName;
JsonEscapedBuilder sJson;
IndexWriteHeader ( tCtx, tWriteHeader, sJson, m_bKeywordDict, true );
sName = tCtx.m_tFilebase.GetFilename ( SPH_EXT_SPH );
CSphWriter wrHeaderJson;
if ( !wrHeaderJson.OpenFile ( sName, sError ) )
return false;
wrHeaderJson.PutString ( (Str_t)sJson );
wrHeaderJson.CloseFile();
if ( wrHeaderJson.IsError() )
return false;
assert ( bson::ValidateJson ( sJson.cstr(), &sError ) );
return true;
}
void RtIndex_c::SaveMeta ( int64_t iTID, VecTraits_T<int> dChunkNames )
{
if ( !m_tSaving.ActiveStateIs ( SaveState_c::ENABLED ) )
return;
// sanity check
if ( m_iLockFD<0 )
return;
// write new meta
auto sMeta = GetFilename ( "meta" );
auto sMetaNew = GetFilename ( "meta.new" );
CSphString sError;
CSphWriter wrMeta;
if ( !wrMeta.OpenFile ( sMetaNew, sError ) )
sphDie ( "failed to open file for meta serialization: %s", sError.cstr() ); // !COMMIT handle this gracefully
WriteMeta ( iTID, dChunkNames, wrMeta );
wrMeta.CloseFile();
// no need to remove old but good meta in case new meta failed to save
if ( wrMeta.IsError() )
{
sphWarning ( "%s", sError.cstr() );
return;
}
// rename
if ( sph::rename ( sMetaNew.cstr(), sMeta.cstr() ) )
sphDie ( "failed to rename meta (src=%s, dst=%s, errno=%d, error=%s)",
sMetaNew.cstr(), sMeta.cstr(), errno, strerrorm(errno) ); // !COMMIT handle this gracefully
SaveMutableSettings ( m_tMutableSettings, GetFilename ( SPH_EXT_SETTINGS ) );
}
void RtIndex_c::WriteMeta ( int64_t iTID, const VecTraits_T<int>& dChunkNames, CSphWriter& wrMeta ) const
{
JsonEscapedBuilder sNewMeta;
sNewMeta.ObjectWBlock();
// human-readable sugar
sNewMeta.NamedString ( "meta_created_time_utc", sphCurrentUtcTime() );
sNewMeta.NamedVal ( "meta_version", META_VERSION );
// sNewMeta.NamedVal ( "index_format_version", INDEX_FORMAT_VERSION );
sNewMeta.NamedVal ( "total_documents", m_tStats.m_iTotalDocuments );
sNewMeta.NamedVal ( "total_bytes", m_tStats.m_iTotalBytes );
sNewMeta.NamedVal ( "tid", iTID );
// meta v.4, save disk index format and settings, too
sNewMeta.NamedVal ( "schema", m_tSchema );
sNewMeta.NamedVal ( "index_settings", m_tSettings );
sNewMeta.Named ( "tokenizer_settings" );
SaveTokenizerSettings ( sNewMeta, m_pTokenizer, m_tSettings.m_iEmbeddedLimit );
sNewMeta.Named ( "dictionary_settings" );
SaveDictionarySettings ( sNewMeta, m_pDict, m_bKeywordDict, m_tSettings.m_iEmbeddedLimit );
// meta v.5
sNewMeta.NamedVal ( "words_checkpoint", m_iWordsCheckpoint);
// meta v.7
sNewMeta.NamedValNonDefault ( "max_codepoint_length", m_iMaxCodepointLength );
sNewMeta.NamedValNonDefault ( "bloom_per_entry_vals_count", BLOOM_PER_ENTRY_VALS_COUNT, 8 );
sNewMeta.NamedValNonDefault ( "bloom_hashes_count", BLOOM_HASHES_COUNT, 2 );
// meta v.11
CSphFieldFilterSettings tFieldFilterSettings;
if ( m_pFieldFilter )
{
m_pFieldFilter->GetSettings(tFieldFilterSettings);
sNewMeta.NamedVal ( "field_filter_settings", tFieldFilterSettings );
}
{
sNewMeta.Named ( "chunk_names" );
auto _ = sNewMeta.Array();
for ( int i : dChunkNames)
sNewMeta << i;
}
// meta v.17+
sNewMeta.NamedVal ( "soft_ram_limit", m_iRtMemLimit );
sNewMeta.FinishBlocks();
wrMeta.PutString ( (Str_t)sNewMeta );
assert ( bson::ValidateJson ( sNewMeta.cstr() ) );
}
void RtIndex_c::SaveMeta()
{
SaveMeta ( m_iTID, GetChunkIds ( *m_tRtChunks.DiskChunks() ) );
}
// looks like spinlock, but actually we switch to parallel strand and back on every tick, so it should not burn CPU
void RtIndex_c::WaitRAMSegmentsUnlocked ( bool bAllowOne ) const
{
TRACE_SCHED ( "rt", "WaitRAMSegmentsUnlocked" );
m_tNSavesNow.Wait ( [bAllowOne] ( int iVal ) { return iVal == ( bAllowOne ? 1 : 0 ); } );
m_tUnLockedSegments.WaitVoid ( [this] { return m_tRtChunks.RamSegs()->none_of ( [] ( const ConstRtSegmentRefPtf_t& a ) { return a->m_iLocked; } ); });
}
template<typename PRED>
int64_t RtIndex_c::GetMemCount ( PRED&& fnPred ) const
{
int64_t iTotal = 0;
for ( const RtSegment_t* pSeg : *m_tRtChunks.RamSegs() )
if ( fnPred ( pSeg ) )
iTotal += pSeg->GetUsedRam();
return iTotal;
}
// i.e. create new disk chunk from ram segments
bool RtIndex_c::SaveDiskChunk ( bool bForced, bool bEmergent, bool bBootstrap ) REQUIRES ( m_tWorkers.SerialChunkAccess() )
{
if ( !m_tSaving.WaitEnabledOrShutdown() )
return !bBootstrap;
assert ( Coro::CurrentScheduler() == m_tWorkers.SerialChunkAccess() );
RTSAVELOG << "SaveDiskChunk (" << ( bForced ? "forced, " : "not forced, " ) << ( bEmergent ? "emergent, " : "not emergent, " ) << ( bBootstrap ? "bootstrap" : "usual" ) << ")";
m_tNSavesNow.Wait ( [] ( int iVal ) { return iVal < SIMULTANEOUS_SAVE_LIMIT; } );
// we're in serial worker - no concurrency, no race between wait() and modify()
m_tNSavesNow.ModifyValue ( [] ( int& iVal ) { ++iVal; } );
auto tFinallySetSaveUnactive = AtScopeExit ( [this] {
++m_iSaveGeneration;
m_tNSavesNow.ModifyValueAndNotifyAll ( [] ( int& iVal ) { --iVal; } );
} );
const int iSaveOp = m_tWorkers.GetNextOpTicket();
TRACE_SCHED_VARID ( "rt", "SaveDiskChunk", iSaveOp );
// if forced - wait all segments. Otherwise, can continue with subset of currently available segments
// note that segments may be locked by currently executing MergeSegments or SaveDiskChunk. If so, we wait them finished and continue.
// that will cause another disk chunk written right after just finished, since op is forced it is ok.
if ( bForced )
WaitRAMSegmentsUnlocked ( true ); // true means to wait 1, not 0 active saves (as we already increased the counter)
// collect all non-occupied non-empty segments and lock them
int64_t iNotMyOpRAM {0};
int64_t iMyOpRAM {0};
KillAccum_t dKillOnSave;
LazyVector_T<ConstRtSegmentRefPtf_t> dSegments;
for ( const auto & pSeg : *m_tRtChunks.RamSegs () )
{
if ( !pSeg->m_iLocked )
{
pSeg->m_iLocked = iSaveOp;
if ( pSeg->m_tAliveRows.load ( std::memory_order_relaxed )!=0 )
{
pSeg->SetKillHook ( &dKillOnSave );
dSegments.Add ( pSeg );
iMyOpRAM += pSeg->GetUsedRam();
}
} else
iNotMyOpRAM += pSeg->GetUsedRam();
}
AT_SCOPE_EXIT ( [&dSegments] { dSegments.for_each ( [] ( const auto& dSeg ) { dSeg->SetKillHook ( nullptr ); } ); } );
UpdateUnlockedCount();
RTSAVELOG << "SaveDiskChunk process " << dSegments.GetLength() << " segments. Active jobs " << m_tNSavesNow.GetValue() << ", op " << iSaveOp
<< " RAM visible+retired/locked/acquired " << iNotMyOpRAM + iMyOpRAM << "+" << m_iRamChunksAllocatedRAM.load ( std::memory_order_relaxed )- iNotMyOpRAM - iMyOpRAM << "/" << iNotMyOpRAM << "/" << iMyOpRAM;
if ( dSegments.IsEmpty() )
return !bBootstrap;
auto iTID = m_iTID;
m_tSaveTIDS.ModifyValue ( [iTID] ( CSphVector<int64_t>& dSaves ) { dSaves.Add ( iTID ); } );
auto tFinallyRemoveTID = AtScopeExit ( [this, iTID] { m_tSaveTIDS.ModifyValueAndNotifyAll ( [iTID] ( CSphVector<int64_t>& dSaves ) { dSaves.RemoveValue ( iTID ); } ); } );
int64_t tmSaveWall = -sphMicroTimer(); // all time including waiting
int64_t tmSave; // only active time
MEMORY ( MEM_INDEX_RT );
int iChunkID = m_tChunkID.MakeChunkId ( m_tRtChunks );
auto sChunk = GetFilename ( iChunkID );
// as we're going to switch fiber, we need to freeze reliable stat and m_iTID (as they could change)
ChunkStats_t tStats ( m_tStats, m_dFieldLensRam );
std::unique_ptr<CSphIndex> pNewChunk;
while ( true )
{
// as separate subtask we 1-st flush segments to disk, and then load just flushed segment
// if forced, continue to work in the same fiber; otherwise split to merge fiber
ScopedScheduler_c tSaveFiber { bForced ? Coro::CurrentScheduler () : m_tWorkers.SaveSegmentsWorker() };
TRACE_SCHED_VARID ( "rt", "SaveDiskChunk-routine", iSaveOp );
tmSave = -sphMicroTimer();
if ( !SaveDiskData ( sChunk.cstr(), dSegments, tStats, m_sLastError ) )
{
sphWarning ( "rt: table %s failed to save disk chunk %s: %s", GetName(), sChunk.cstr(), m_sLastError.cstr() );
tmSave += sphMicroTimer();
break;
}
// bring new disk chunk online
auto fnFnameBuilder = GetIndexFilenameBuilder ();
StrVec_t dWarnings;
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder;
if ( fnFnameBuilder )
pFilenameBuilder = fnFnameBuilder ( GetName () );
pNewChunk = PreallocDiskChunk ( sChunk, iChunkID, pFilenameBuilder.get (), dWarnings, m_sLastError );
if ( !dWarnings.IsEmpty() )
{
StringBuilder_c sWarningLine ( "; " );
for ( const auto & sItem : dWarnings )
sWarningLine << sItem;
sphWarning ( "rt: table %s save disk chunk %s warnings: %s", GetName(), sChunk.cstr(), sWarningLine.cstr() );
}
tmSave += sphMicroTimer();
break;
}
// here we back into serial fiber. As we're switched, we can't rely on m_iTID and index stats anymore
if ( !pNewChunk )
{
sphWarning ( "rt: table %s failed to load disk chunk after RAM save: %s", GetName(), m_sLastError.cstr () );
return false;
}
// applying postponed kills is ok now, since no other kills would happen as we're in serial fiber.
if ( !dKillOnSave.m_dDocids.IsEmpty() )
{
RTLOGV << "SaveDiskChunk: apply postponed kills";
tmSave -= sphMicroTimer ();
dKillOnSave.m_dDocids.Uniq ();
pNewChunk->KillMulti ( dKillOnSave.m_dDocids );
dKillOnSave.m_dDocids.Reset();
tmSave += sphMicroTimer ();
}
auto dUpdates = GatherUpdates::FromChunksOrSegments ( dSegments );
if ( !dUpdates.IsEmpty () )
{
RTLOGV << "SaveDiskChunk: apply postponed updates";
pNewChunk->UpdateAttributesOffline ( dUpdates );
dUpdates.Reset();
}
int iSegments = dSegments.GetLength ();
dSegments.Reset (); // we don't need them anymore
// update field lengths (offline, will swap under lock)
CSphFixedVector<int64_t> dNewFieldLensRam { 0 };
CSphFixedVector<int64_t> dNewFieldLensDisk { 0 };
if ( m_tSchema.GetAttrId_FirstFieldLen ()>=0 )
{
dNewFieldLensRam.Reset ( SPH_MAX_FIELDS );
dNewFieldLensDisk.Reset ( SPH_MAX_FIELDS );
ARRAY_FOREACH ( i, tStats.m_dFieldLens )
{
dNewFieldLensRam[i] = m_dFieldLensRam[i] - tStats.m_dFieldLens[i];
dNewFieldLensDisk[i] = m_dFieldLensDisk[i] + tStats.m_dFieldLens[i];
}
}
// here is pickpoint: if we save some chunks in parallel, here we *NEED* to be sure, that later is not published before older
// That is about binlog consistency: if we save trx 1-1000 and at the same time 1000-1010, last might finish faster, but it can't be committed immediately,
// as last highest trx will be 1010, and nobody knows, that actually 1-1000 are not yet safe.
BEGIN_SCHED ( "rt", "SaveDiskChunk-wait" ); // iSaveOp as id
m_tSaveTIDS.WaitVoid ( [this, iTID] { return m_tSaveTIDS.GetValueRef().First() == iTID; } );
END_SCHED( "rt" );
int iDiskChunks;
// now new disk chunk is loaded, kills and updates applied - we ready to change global index state now.
{
auto tNewSet = RtWriter();
tNewSet.InitDiskChunks ( RtWriter_c::copy );
tNewSet.m_pNewDiskChunks->Add ( DiskChunk_c::make ( std::move ( pNewChunk ) ) );
SaveMeta ( iTID, GetChunkIds ( *tNewSet.m_pNewDiskChunks ) );
Binlog::NotifyIndexFlush ( iTID, GetName(), Binlog::NoShutdown, Binlog::NoSave );
m_iSavedTID = iTID;
tNewSet.InitRamSegs ( RtWriter_c::empty );
for ( const auto & pSeg : *m_tRtChunks.RamSegs() )
if ( pSeg->m_iLocked!=iSaveOp )
tNewSet.m_pNewRamSegs->Add ( pSeg );
// update field lengths
if ( m_tSchema.GetAttrId_FirstFieldLen ()>=0 )
{
m_dFieldLensRam.SwapData ( dNewFieldLensRam );
m_dFieldLensDisk.SwapData ( dNewFieldLensDisk );
}
iDiskChunks = tNewSet.m_pNewDiskChunks->GetLength();
}
// from this point all readers will see new state of the index.
// if saving caused from loading .ram - we're done (DON't need to abandon .ram file!)
if ( bBootstrap )
return true;
// abandon .ram file
UnlinkRAMChunk ( "SaveDiskChunk" );
m_tmSaved = sphMicroTimer ();
tmSaveWall += m_tmSaved;
StringBuilder_c sInfo;
sInfo.Sprintf ( "rt: table %s: diskchunk %d(%d), segments %d %s saved in %.6D (%.6D) sec", GetName (), iChunkID
, iDiskChunks, iSegments, bForced ? "forcibly" : "", tmSave, tmSaveWall );
// calculate DoubleBuf percent using current save/insert rate
auto iInserted = GetMemCount ( [iSaveOp] ( const auto* pSeg ) { return !pSeg->m_iLocked || pSeg->m_iLocked > iSaveOp; } );
RecalculateRateLimit ( iMyOpRAM, iInserted, bEmergent );
RTSAVELOG << sInfo.cstr() << ", op " << iSaveOp << " RAM saved/new " << iMyOpRAM << "/" << iInserted
<< " Insert ratio is " << m_fSaveRateLimit << " (soft ram limit " << m_iSoftRamLimit << ", rt mem limit " << m_iRtMemLimit << ")";
sInfo << ", RAM saved/new " << iMyOpRAM << "/" << iInserted << " ratio " << m_fSaveRateLimit << " (soft limit " << m_iSoftRamLimit << ", conf limit " << m_iRtMemLimit << ")";
sphInfo ( "%s", sInfo.cstr() );
Preread();
CheckStartAutoOptimize();
return true;
}
std::unique_ptr<CSphIndex> RtIndex_c::PreallocDiskChunk ( const CSphString& sChunk, int iChunk, FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings, CSphString & sError, const char * szName ) const
{
MEMORY ( MEM_INDEX_DISK );
// !COMMIT handle errors gracefully instead of dying
auto pDiskChunk = sphCreateIndexPhrase ( ( szName ? szName : sChunk.cstr() ), sChunk );
if ( !pDiskChunk )
{
sError.SetSprintf ( "disk chunk %s: alloc failed", sChunk.cstr() );
return pDiskChunk;
}
pDiskChunk->m_iExpansionLimit = m_iExpansionLimit;
pDiskChunk->SetMutableSettings ( m_tMutableSettings );
pDiskChunk->SetGlobalIDFPath ( m_sGlobalIDFPath );
pDiskChunk->SetBinlog ( false );
pDiskChunk->m_iChunk = iChunk;
if ( m_bDebugCheck )
pDiskChunk->SetDebugCheck ( m_bCheckIdDups, -1 );
if ( !pDiskChunk->Prealloc ( m_bPathStripped, pFilenameBuilder, dWarnings ) )
{
sError.SetSprintf ( "disk chunk %s: prealloc failed: %s", sChunk.cstr(), pDiskChunk->GetLastError().cstr() );
pDiskChunk = nullptr;
}
return pDiskChunk;
}
RtIndex_c::LOAD_E RtIndex_c::LoadMetaLegacy ( FilenameBuilder_i * pFilenameBuilder, bool bStripPath, DWORD & uVersion, bool & bRebuildInfixes, StrVec_t & dWarnings )
{
CSphString sMeta = GetFilename ( "meta" );
m_sLastError = "";
// opened and locked, lets read
CSphAutoreader rdMeta;
if ( !rdMeta.Open ( sMeta, m_sLastError ) )
return LOAD_E::GeneralError_e;
if ( rdMeta.GetDword()!=META_HEADER_MAGIC )
{
m_sLastError.SetSprintf ( "invalid meta file %s", sMeta.cstr() );
return LOAD_E::ParseError_e;
}
uVersion = rdMeta.GetDword();
if ( uVersion==0 || uVersion>META_VERSION )
{
m_sLastError.SetSprintf ( "%s is v.%u, binary is v.%u", sMeta.cstr(), uVersion, META_VERSION );
return LOAD_E::GeneralError_e;
}
DWORD uMinFormatVer = 14;
if ( uVersion<uMinFormatVer )
{
m_sLastError.SetSprintf ( "tables with meta prior to v.%u are no longer supported (use index_converter tool); %s is v.%u", uMinFormatVer, sMeta.cstr(), uVersion );
return LOAD_E::GeneralError_e;
}
m_tStats.m_iTotalDocuments = rdMeta.GetDword();
m_tStats.m_iTotalBytes = rdMeta.GetOffset();
m_iTID = rdMeta.GetOffset();
// tricky bit
// we started saving settings into .meta from v.4 and up only
// and those reuse disk format version, aka INDEX_FORMAT_VERSION
// anyway, starting v.4, serialized settings take precedence over config
// so different chunks can't have different settings any more
CSphTokenizerSettings tTokenizerSettings;
CSphDictSettings tDictSettings;
CSphEmbeddedFiles tEmbeddedFiles;
// load them settings
DWORD uSettingsVer = rdMeta.GetDword();
CSphSchema tSchema;
ReadSchema ( rdMeta, tSchema, uSettingsVer );
SetSchema ( std::move ( tSchema ) );
LoadIndexSettings ( m_tSettings, rdMeta, uSettingsVer );
if ( !tTokenizerSettings.Load ( pFilenameBuilder, rdMeta, tEmbeddedFiles, m_sLastError ) )
return LOAD_E::GeneralError_e;
{
CSphString sWarning;
tDictSettings.Load ( rdMeta, tEmbeddedFiles, pFilenameBuilder, sWarning );
if ( !sWarning.IsEmpty() )
dWarnings.Add(sWarning);
}
m_bKeywordDict = tDictSettings.m_bWordDict;
// initialize AOT if needed
DWORD uPrevAot = m_tSettings.m_uAotFilterMask;
m_tSettings.m_uAotFilterMask = sphParseMorphAot ( tDictSettings.m_sMorphology.cstr() );
if ( m_tSettings.m_uAotFilterMask!=uPrevAot )
{
CSphString sWarning;
sWarning.SetSprintf ( "table '%s': morphology option changed from config has no effect, ignoring", GetName() );
dWarnings.Add(sWarning);
}
if ( bStripPath )
{
StripPath ( tTokenizerSettings.m_sSynonymsFile );
ARRAY_FOREACH ( i, tDictSettings.m_dWordforms )
StripPath ( tDictSettings.m_dWordforms[i] );
}
// recreate tokenizer
m_pTokenizer = Tokenizer::Create ( tTokenizerSettings, &tEmbeddedFiles, pFilenameBuilder, dWarnings, m_sLastError );
if ( !m_pTokenizer )
return LOAD_E::GeneralError_e;
// recreate dictionary
m_pDict = sphCreateDictionaryCRC ( tDictSettings, &tEmbeddedFiles, m_pTokenizer, GetName(), bStripPath, m_tSettings.m_iSkiplistBlockSize, pFilenameBuilder, m_sLastError );
if ( !m_sLastError.IsEmpty() )
m_sLastError.SetSprintf ( "table '%s': %s", GetName(), m_sLastError.cstr() );
if ( !m_pDict )
return LOAD_E::GeneralError_e;
if ( !m_sLastError.IsEmpty() )
dWarnings.Add(m_sLastError);
Tokenizer::AddToMultiformFilterTo ( m_pTokenizer, m_pDict->GetMultiWordforms () );
m_iWordsCheckpoint = rdMeta.GetDword();
// check that infixes definition changed - going to rebuild infixes
m_iMaxCodepointLength = rdMeta.GetDword();
int iBloomKeyLen = rdMeta.GetByte();
int iBloomHashesCount = rdMeta.GetByte();
bRebuildInfixes = ( iBloomKeyLen!=BLOOM_PER_ENTRY_VALS_COUNT || iBloomHashesCount!=BLOOM_HASHES_COUNT );
if ( bRebuildInfixes )
{
CSphString sWarning;
sWarning.SetSprintf ( "infix definition changed (from len=%d, hashes=%d to len=%d, hashes=%d) - rebuilding...",
(int)BLOOM_PER_ENTRY_VALS_COUNT, (int)BLOOM_HASHES_COUNT, iBloomKeyLen, iBloomHashesCount );
dWarnings.Add(sWarning);
}
std::unique_ptr<ISphFieldFilter> pFieldFilter;
CSphFieldFilterSettings tFieldFilterSettings;
tFieldFilterSettings.Load(rdMeta);
if ( tFieldFilterSettings.m_dRegexps.GetLength() )
pFieldFilter = sphCreateRegexpFilter ( tFieldFilterSettings, m_sLastError );
if ( !sphSpawnFilterICU ( pFieldFilter, m_tSettings, tTokenizerSettings, sMeta.cstr(), m_sLastError ) )
return LOAD_E::GeneralError_e;
SetFieldFilter ( std::move ( pFieldFilter ) );
int iLen = (int)rdMeta.GetDword();
m_dChunkNames.Reset ( iLen );
rdMeta.GetBytes ( m_dChunkNames.Begin(), iLen*sizeof(int) );
if ( uVersion>=17 )
SetMemLimit ( rdMeta.GetOffset() );
return LOAD_E::Ok_e;
}
RtIndex_c::LOAD_E RtIndex_c::LoadMetaJson ( FilenameBuilder_i * pFilenameBuilder, bool bStripPath, DWORD & uVersion, bool & bRebuildInfixes, StrVec_t & dWarnings )
{
using namespace bson;
CSphString sMeta = GetFilename ( "meta" );
CSphVector<BYTE> dData;
if ( !sphJsonParse ( dData, sMeta, m_sLastError ) )
return LOAD_E::ParseError_e;
Bson_c tBson ( dData );
if ( tBson.IsEmpty() || !tBson.IsAssoc() )
{
m_sLastError = "Something wrong read from json meta - it is either empty, either not root object.";
return LOAD_E::ParseError_e;
}
// version
uVersion = (DWORD)Int ( tBson.ChildByName ( "meta_version" ) );
if ( uVersion==21 ) uVersion = 20; // fixme! a little hack, m.b. deal another way? v21 is minor of v20
if ( uVersion == 0 || uVersion > META_VERSION )
{
m_sLastError.SetSprintf ( "%s is v.%u, binary is v.%u", sMeta.cstr(), uVersion, META_VERSION );
return LOAD_E::GeneralError_e;
}
DWORD uMinFormatVer = 20;
if ( uVersion<uMinFormatVer )
{
m_sLastError.SetSprintf ( "tables with meta prior to v.%u are no longer supported (use index_converter tool); %s is v.%u", uMinFormatVer, sMeta.cstr(), uVersion );
return LOAD_E::GeneralError_e;
}
m_tStats.m_iTotalDocuments = Int ( tBson.ChildByName ( "total_documents" ) );
m_tStats.m_iTotalBytes = Int ( tBson.ChildByName ( "total_bytes" ) );
m_iTID = Int ( tBson.ChildByName ( "tid" ) );
if ( m_iTID<0 )
m_tSettings.m_bBinlog = false;
// tricky bit
// we started saving settings into .meta from v.4 and up only
// and those reuse disk format version, aka INDEX_FORMAT_VERSION
// anyway, starting v.4, serialized settings take precedence over config
// so different chunks can't have different settings any more
CSphTokenizerSettings tTokenizerSettings;
CSphDictSettings tDictSettings;
CSphEmbeddedFiles tEmbeddedFiles;
// load them settings
// DWORD uSettingsVer = Int ( tBson.ChildByName ( "index_format_version" ) );
CSphSchema tSchema;
ReadSchemaJson ( tBson.ChildByName ( "schema" ), tSchema );
SetSchema ( std::move ( tSchema ) );
LoadIndexSettingsJson ( tBson.ChildByName ( "index_settings" ), m_tSettings );
if ( !tTokenizerSettings.Load ( pFilenameBuilder, tBson.ChildByName ( "tokenizer_settings" ), tEmbeddedFiles, m_sLastError ) )
return LOAD_E::GeneralError_e;
{
CSphString sWarning;
tDictSettings.Load ( tBson.ChildByName ( "dictionary_settings" ), tEmbeddedFiles, pFilenameBuilder, sWarning );
if ( !sWarning.IsEmpty() )
dWarnings.Add(sWarning);
}
m_bKeywordDict = tDictSettings.m_bWordDict;
// initialize AOT if needed
DWORD uPrevAot = m_tSettings.m_uAotFilterMask;
m_tSettings.m_uAotFilterMask = sphParseMorphAot ( tDictSettings.m_sMorphology.cstr() );
if ( m_tSettings.m_uAotFilterMask!=uPrevAot )
{
CSphString sWarning;
sWarning.SetSprintf ( "table '%s': morphology option changed from config has no effect, ignoring", GetName() );
dWarnings.Add(sWarning);
}
if ( bStripPath )
{
StripPath ( tTokenizerSettings.m_sSynonymsFile );
ARRAY_FOREACH ( i, tDictSettings.m_dWordforms )
StripPath ( tDictSettings.m_dWordforms[i] );
}
// recreate tokenizer
m_pTokenizer = Tokenizer::Create ( tTokenizerSettings, &tEmbeddedFiles, pFilenameBuilder, dWarnings, m_sLastError );
if ( !m_pTokenizer )
return LOAD_E::GeneralError_e;
// recreate dictionary
m_pDict = sphCreateDictionaryCRC ( tDictSettings, &tEmbeddedFiles, m_pTokenizer, GetName(), bStripPath, m_tSettings.m_iSkiplistBlockSize, pFilenameBuilder, m_sLastError );
if ( !m_sLastError.IsEmpty() )
m_sLastError.SetSprintf ( "table '%s': %s", GetName(), m_sLastError.cstr() );
if ( !m_pDict )
return LOAD_E::GeneralError_e;
if ( !m_sLastError.IsEmpty() )
dWarnings.Add(m_sLastError);
Tokenizer::AddToMultiformFilterTo ( m_pTokenizer, m_pDict->GetMultiWordforms () );
m_iWordsCheckpoint = (int)Int ( tBson.ChildByName ( "words_checkpoint" ) );
// check that infixes definition changed - going to rebuild infixes
m_iMaxCodepointLength = (int)Int ( tBson.ChildByName ( "max_codepoint_length" ) );
int iBloomKeyLen = (int)Int ( tBson.ChildByName ( "bloom_per_entry_vals_count" ), 8 );
int iBloomHashesCount = (int)Int ( tBson.ChildByName ( "bloom_hashes_count" ), 2 );
bRebuildInfixes = ( iBloomKeyLen!=BLOOM_PER_ENTRY_VALS_COUNT || iBloomHashesCount!=BLOOM_HASHES_COUNT );
if ( bRebuildInfixes )
{
CSphString sWarning;
sWarning.SetSprintf ( "infix definition changed (from len=%d, hashes=%d to len=%d, hashes=%d) - rebuilding...",
(int)BLOOM_PER_ENTRY_VALS_COUNT, (int)BLOOM_HASHES_COUNT, iBloomKeyLen, iBloomHashesCount );
dWarnings.Add(sWarning);
}
std::unique_ptr<ISphFieldFilter> pFieldFilter;
auto tFieldFilterSettingsNode = tBson.ChildByName ( "field_filter_settings" );
if ( !IsNullNode ( tFieldFilterSettingsNode ) )
{
CSphFieldFilterSettings tFieldFilterSettings;
Bson_c ( tFieldFilterSettingsNode ).ForEach ( [&tFieldFilterSettings] ( const NodeHandle_t& tNode ) {
tFieldFilterSettings.m_dRegexps.Add ( String ( tNode ) );
} );
if ( !tFieldFilterSettings.m_dRegexps.IsEmpty() )
pFieldFilter = sphCreateRegexpFilter ( tFieldFilterSettings, m_sLastError );
}
if ( !sphSpawnFilterICU ( pFieldFilter, m_tSettings, tTokenizerSettings, sMeta.cstr(), m_sLastError ) )
return LOAD_E::GeneralError_e;
SetFieldFilter ( std::move ( pFieldFilter ) );
Bson_c tNamesVec { tBson.ChildByName ( "chunk_names" ) };
m_dChunkNames.Reset ( tNamesVec.CountValues() );
int iLastQ = 0;
tNamesVec.ForEach ( [&iLastQ, this] ( const NodeHandle_t& tNode ) { m_dChunkNames[iLastQ++] = (int)Int ( tNode ); });
SetMemLimit ( Int ( tBson.ChildByName ( "soft_ram_limit" ) ) );
auto tIndexId = tBson.ChildByName ( "index_id" );
if ( !IsNullNode ( tIndexId ) )
m_iIndexId = Int ( tIndexId );
return LOAD_E::Ok_e;
}
bool RtIndex_c::LoadMetaImpl ( FilenameBuilder_i * pFilenameBuilder, bool bStripPath, DWORD & uVersion, bool & bRebuildInfixes, StrVec_t & dWarnings )
{
// check if we have a meta file (kinda-header)
CSphString sMeta = GetFilename ( "meta" );
// no readable meta? no disk part yet
if ( !sphIsReadable ( sMeta.cstr() ) )
return true;
auto eRes = LoadMetaJson ( pFilenameBuilder, bStripPath, uVersion, bRebuildInfixes, dWarnings );
if ( eRes == LOAD_E::ParseError_e )
{
sphInfo ( "Index meta format is not json, will try it as binary..." );
eRes = LoadMetaLegacy ( pFilenameBuilder, bStripPath, uVersion, bRebuildInfixes, dWarnings );
if ( eRes == LOAD_E::ParseError_e )
{
sphWarning ( "Unable to parse header... Error %s", m_sLastError.cstr() );
return false;
}
}
if ( eRes == LOAD_E::GeneralError_e )
{
sphWarning ( "Unable to load header... Error %s", m_sLastError.cstr() );
return false;
}
assert ( eRes == LOAD_E::Ok_e );
return true;
}
bool RtIndex_c::LoadMeta ( FilenameBuilder_i* pFilenameBuilder, bool bStripPath, DWORD& uVersion, bool& bRebuildInfixes, StrVec_t& dWarnings )
{
if ( LoadMetaImpl ( pFilenameBuilder, bStripPath, uVersion, bRebuildInfixes, dWarnings ) )
return true;
const char* szDumpPath = getenv ( "dump_corrupt_meta" );
if ( !szDumpPath )
return false;
CSphString sMeta = GetFilename ( "meta" );
CSphString sDestPath = SphSprintf ( "%s%s", szDumpPath, "index.meta" );
CSphString sError;
if ( !CopyFile ( sMeta, sDestPath, sError ) )
sphWarning ( "%s", sError.cstr() );
return false;
}
bool RtIndex_c::PreallocDiskChunks ( FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings ) NO_THREAD_SAFETY_ANALYSIS
{
// load disk chunks, if any
auto tWriter = RtWriter();
tWriter.InitDiskChunks ( RtWriter_c::empty );
ARRAY_FOREACH ( iName, m_dChunkNames )
{
int iChunkIndex = m_dChunkNames[iName];
CSphString sChunk = GetFilename ( iChunkIndex );
auto pChunk = DiskChunk_c::make ( PreallocDiskChunk ( sChunk, iChunkIndex, pFilenameBuilder, dWarnings, m_sLastError ) );
if ( !pChunk )
return false;
auto* pIndex = (const CSphIndex*)*pChunk;
// tricky bit
// outgoing match schema on disk chunk should be identical to our internal (!) schema
if ( !m_tSchema.CompareTo ( pIndex->GetMatchSchema(), m_sLastError, true, true ) )
return false;
tWriter.m_pNewDiskChunks->Add ( pChunk );
// update field lengths
if ( m_tSchema.GetAttrId_FirstFieldLen()>=0 )
{
int64_t * pLens = pIndex->GetFieldLens();
if ( pLens )
for ( int i=0; i < pIndex->GetMatchSchema().GetFieldsCount(); ++i )
m_dFieldLensDisk[i] += pLens[i];
}
}
m_dChunkNames.Reset(0);
return true;
}
bool RtIndex_c::Prealloc ( bool bStripPath, FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings )
{
MEMORY ( MEM_INDEX_RT );
// locking uber alles
// in RT backend case, we just must be multi-threaded
// so we simply lock here, and ignore Lock/Unlock hassle caused by forks
assert ( m_iLockFD<0 );
m_bPreallocPassedOk = false;
CSphString sLock = GetFilename ( "lock" );
m_iLockFD = ::open ( sLock.cstr(), SPH_O_NEW, 0644 );
if ( m_iLockFD<0 )
{
m_sLastError.SetSprintf ( "failed to open %s: %s", sLock.cstr(), strerrorm(errno) );
return false;
}
if ( !sphLockEx ( m_iLockFD, false ) )
{
SafeClose ( m_iLockFD );
if ( !m_bDebugCheck )
{
m_sLastError.SetSprintf ( "failed to lock %s: %s", sLock.cstr(), strerrorm(errno) );
return false;
}
}
DWORD uVersion = 0;
bool bRebuildInfixes = false;
if ( !LoadMeta ( pFilenameBuilder, bStripPath, uVersion, bRebuildInfixes, dWarnings ) )
return false;
CSphString sMutableFile = GetFilename ( SPH_EXT_SETTINGS );
m_tMutableSettings.m_iMemLimit = m_iRtMemLimit; // to avoid overriding value from meta by default value, if no settings provided
if ( !m_tMutableSettings.Load ( sMutableFile.cstr(), GetName() ) )
return false;
SetMemLimit ( m_tMutableSettings.m_iMemLimit );
if ( m_tMutableSettings.IsSet ( MutableName_e::GLOBAL_IDF ) )
m_sGlobalIDFPath = m_tMutableSettings.m_sGlobalIDFPath;
m_bPathStripped = bStripPath;
if ( m_tSchema.HasColumnarAttrs() && !IsColumnarLibLoaded() )
{
m_sLastError.SetSprintf ( "failed to load table with columnar attributes without columnar library" );
return false;
}
if ( m_bDebugCheck )
{
// load ram chunk
m_bPreallocPassedOk = LoadRamChunk ( uVersion, bRebuildInfixes, false );
return m_bPreallocPassedOk;
}
m_tWorkers.InitWorkers();
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_SCHED ( "rt", "Prealloc" );
if ( !PreallocDiskChunks ( pFilenameBuilder, dWarnings ) )
return false;
// load ram chunk
m_bPreallocPassedOk = LoadRamChunk ( uVersion, bRebuildInfixes );
// field lengths
ARRAY_FOREACH ( i, m_dFieldLens )
m_dFieldLens[i] = m_dFieldLensDisk[i] + m_dFieldLensRam[i];
// set up values for on timer save
m_iSavedTID = m_iTID;
m_tmSaved = sphMicroTimer();
// neet to set m_iSoftRamLimit more than iUsedRam to prevent flush of disk chunk right after index load
int64_t iUsedRam = SegmentsGetUsedRam ( *m_tRtChunks.RamSegs() );
RecalculateRateLimit ( iUsedRam, 1, false );
RunMergeSegmentsWorker();
return m_bPreallocPassedOk;
}
void RtIndex_c::Preread ()
{
auto pChunks = m_tRtChunks.DiskChunks();
for ( auto& pChunk : *pChunks )
{
if (pChunk)
pChunk->CastIdx().Preread();
if ( sphInterrupted() )
break;
}
}
template<typename P>
static bool CheckVectorLength ( int iLen, int64_t iMinLen, const char * sAt, CSphString & sError )
{
auto iSaneLen = Min ( iMinLen, P::SANE_SIZE );
if ( iLen>=0 && iLen<iSaneLen )
return true;
sError.SetSprintf ( "broken table, %s length overflow (len=%d, max=" INT64_FMT ")", sAt, iLen, iSaneLen );
return false;
}
template < typename T >
static void SaveVector ( CSphWriter & tWriter, const VecTraits_T < T > & tVector )
{
STATIC_ASSERT ( IS_TRIVIALLY_COPYABLE(T), NON_TRIVIAL_VECTORS_ARE_UNSERIALIZABLE );
tWriter.PutDword ( tVector.GetLength() );
if ( tVector.GetLength() )
tWriter.PutBytes ( tVector.Begin(), tVector.GetLengthBytes() );
}
template < typename T, typename P >
static bool LoadVector ( CSphReader & tReader, CSphVector < T, P > & tVector,
int64_t iMinLen, const char * sAt, CSphString & sError )
{
STATIC_ASSERT ( IS_TRIVIALLY_COPYABLE(T), NON_TRIVIAL_VECTORS_ARE_UNSERIALIZABLE );
int iSize = tReader.GetDword();
if ( !CheckVectorLength<P> ( iSize, iMinLen, sAt, sError ) )
return false;
tVector.Resize ( iSize );
if ( tVector.GetLength() )
tReader.GetBytes ( tVector.Begin(), (int) tVector.GetLengthBytes() );
return true;
}
void RtIndex_c::SaveRamSegment ( const RtSegment_t* pSeg, CSphWriter& wrChunk ) const REQUIRES_SHARED ( pSeg->m_tLock )
{
wrChunk.PutDword ( pSeg->m_uRows );
wrChunk.PutDword ( (DWORD)pSeg->m_tAliveRows.load ( std::memory_order_relaxed ) );
wrChunk.PutDword ( 0 );
SaveVector ( wrChunk, pSeg->m_dWords );
if ( m_bKeywordDict )
SaveVector ( wrChunk, pSeg->m_dKeywordCheckpoints );
auto pCheckpoints = (const char*)pSeg->m_dKeywordCheckpoints.Begin();
wrChunk.PutDword ( pSeg->m_dWordCheckpoints.GetLength() );
for ( const auto& dWordCheckpoint : pSeg->m_dWordCheckpoints )
{
wrChunk.PutOffset ( dWordCheckpoint.m_iOffset );
if ( m_bKeywordDict )
wrChunk.PutOffset ( dWordCheckpoint.m_szWord - pCheckpoints );
else
wrChunk.PutOffset ( dWordCheckpoint.m_uWordID );
}
SaveVector ( wrChunk, pSeg->m_dDocs );
SaveVector ( wrChunk, pSeg->m_dHits );
SaveVector ( wrChunk, pSeg->m_dRows );
pSeg->m_tDeadRowMap.Save ( wrChunk );
SaveVector ( wrChunk, pSeg->m_dBlobs );
if ( pSeg->m_pDocstore )
pSeg->m_pDocstore->Save ( wrChunk );
wrChunk.PutByte ( pSeg->m_pColumnar ? 1 : 0 );
if ( pSeg->m_pColumnar )
pSeg->m_pColumnar->Save ( wrChunk );
// infixes
SaveVector ( wrChunk, pSeg->m_dInfixFilterCP );
}
void RtIndex_c::SaveRamFieldLengths ( CSphWriter& wrChunk ) const
{
// field lengths
wrChunk.PutDword ( m_tSchema.GetFieldsCount() );
for ( int i = 0; i < m_tSchema.GetFieldsCount(); ++i )
wrChunk.PutOffset ( m_dFieldLensRam[i] );
}
bool RtIndex_c::SaveRamChunk ()
{
if ( !m_tSaving.ActiveStateIs ( SaveState_c::ENABLED ) )
return false;
MEMORY ( MEM_INDEX_RT );
CSphString sChunk = GetFilename ( "ram" );
CSphString sNewChunk = GetFilename ( "ram.new" );
CSphWriter wrChunk;
if ( !wrChunk.OpenFile ( sNewChunk, m_sLastError ) )
return false;
auto pSegments = m_tRtChunks.RamSegs();
auto& dSegments = *pSegments;
wrChunk.PutDword ( 0 );
wrChunk.PutDword ( dSegments.GetLength() );
// no locks here, because it's only intended to be called from dtor
for ( const RtSegment_t * pSeg : dSegments )
{
SccRL_t rLock ( pSeg->m_tLock );
SaveRamSegment ( pSeg, wrChunk );
}
SaveRamFieldLengths ( wrChunk );
wrChunk.CloseFile();
if ( wrChunk.IsError() )
return false;
// rename
if ( sph::rename ( sNewChunk.cstr(), sChunk.cstr() ) )
sphDie ( "failed to rename ram chunk (src=%s, dst=%s, errno=%d, error=%s)",
sNewChunk.cstr(), sChunk.cstr(), errno, strerrorm(errno) ); // !COMMIT handle this gracefully
return true;
}
bool RtIndex_c::LoadRamChunk ( DWORD uVersion, bool bRebuildInfixes, bool bFixup ) NO_THREAD_SAFETY_ANALYSIS
{
MEMORY ( MEM_INDEX_RT );
CSphString sChunk = GetFilename ( "ram" );
if ( !sphIsReadable ( sChunk.cstr(), &m_sLastError ) )
return true;
m_bHasFiles = true;
CSphAutoreader rdChunk;
if ( !rdChunk.Open ( sChunk, m_sLastError ) )
return false;
int64_t iFileSize = rdChunk.GetFilesize();
bool bHasMorphology = ( m_pDict && m_pDict->HasMorphology() ); // fresh and old-format index still has no dictionary at this point
rdChunk.GetDword ();
auto iSegmentCount = (int) rdChunk.GetDword();
if ( !CheckVectorLength<RtSegVec_c::BASE> ( iSegmentCount, iFileSize, "ram-chunks", m_sLastError ) )
return false;
CSphVector<RtSegmentRefPtf_t> dRawSegments;
bool bSafeLoad = bFixup && iSegmentCount > MAX_TOLERATE_LOAD_SEGMENTS;
if ( bSafeLoad )
sphWarning ( "RAM chunk has %d segments, need to be repaired...", iSegmentCount );
int iEmpty=0;
DWORD uAlive = 0;
auto tWriter = RtWriter();
tWriter.InitRamSegs ( RtWriter_c::empty );
for ( int i = 0; i < iSegmentCount; ++i )
{
DWORD uRows = rdChunk.GetDword();
RtSegmentRefPtf_t pSeg { new RtSegment_t ( uRows, m_tSchema ) };
pSeg->m_tAliveRows.store ( rdChunk.GetDword (), std::memory_order_relaxed );
rdChunk.GetDword ();
if ( !LoadVector ( rdChunk, pSeg->m_dWords, iFileSize, "ram-words", m_sLastError ) )
return false;
if ( m_bKeywordDict && !LoadVector ( rdChunk, pSeg->m_dKeywordCheckpoints, iFileSize, "ram-checkpoints", m_sLastError ) )
return false;
auto * pCheckpoints = (const char *)pSeg->m_dKeywordCheckpoints.Begin();
auto iCheckpointCount = (int) rdChunk.GetDword();
if ( !CheckVectorLength<decltype( pSeg->m_dWordCheckpoints)> ( iCheckpointCount, iFileSize, "ram-checkpoints", m_sLastError ) )
return false;
pSeg->m_dWordCheckpoints.Resize ( iCheckpointCount );
for ( auto& tWordCheckpoint : pSeg->m_dWordCheckpoints )
{
tWordCheckpoint.m_iOffset = (int)rdChunk.GetOffset();
SphOffset_t uOff = rdChunk.GetOffset();
if ( m_bKeywordDict )
tWordCheckpoint.m_szWord = pCheckpoints + uOff;
else
tWordCheckpoint.m_uWordID = (SphWordID_t)uOff;
}
if ( !LoadVector ( rdChunk, pSeg->m_dDocs, iFileSize, "ram-doclist", m_sLastError ) )
return false;
if ( !LoadVector ( rdChunk, pSeg->m_dHits, iFileSize, "ram-hitlist", m_sLastError ) )
return false;
if ( !LoadVector ( rdChunk, pSeg->m_dRows, iFileSize, "ram-attributes", m_sLastError ) )
return false;
pSeg->m_tDeadRowMap.Load ( uRows, rdChunk, m_sLastError );
if ( !LoadVector ( rdChunk, pSeg->m_dBlobs, iFileSize, "ram-blobs", m_sLastError ) )
return false;
if ( uVersion>=15 && ( m_tSchema.HasStoredFields() || m_tSchema.HasStoredAttrs() ) )
{
pSeg->m_pDocstore = CreateDocstoreRT();
SetupDocstoreFields ( *pSeg->m_pDocstore, m_tSchema );
assert ( pSeg->m_pDocstore );
if ( !pSeg->m_pDocstore->Load ( rdChunk ) )
return false;
}
if ( uVersion>=19 && rdChunk.GetByte() )
{
pSeg->m_pColumnar = CreateColumnarRT ( m_tSchema, rdChunk, m_sLastError );
if ( !pSeg->m_pColumnar )
return false;
}
// infixes
if ( !LoadVector ( rdChunk, pSeg->m_dInfixFilterCP, iFileSize, "ram-infixes", m_sLastError ) )
return false;
if ( bRebuildInfixes )
BuildSegmentInfixes ( pSeg, bHasMorphology, m_bKeywordDict, m_tSettings.m_iMinInfixLen, m_iWordsCheckpoint, ( m_iMaxCodepointLength>1 ), m_tSettings.m_eHitless );
pSeg->BuildDocID2RowIDMap(m_tSchema);
CheckSegmentConsistency ( pSeg );
if ( bSafeLoad )
{
int64_t iAlive = pSeg->m_tAliveRows.load ( std::memory_order_relaxed );
if ( iAlive )
{
uAlive += iAlive;
dRawSegments.Add ( pSeg );
} else // skip that dead guy...
++iEmpty;
} else
tWriter.m_pNewRamSegs->Add ( AdoptSegment ( pSeg ) );
}
if ( bSafeLoad )
{
iSegmentCount = dRawSegments.GetLength();
sphInfo ( "RAM chunk repairing %d segments (%d totally killed segments dropped)", iSegmentCount, iEmpty );
// ideal rt-chunk structure (model) - rows per segment, sorted desc.
// 1024 1024 1024 ... 1024 512 256 128 64
// |______ MAX_SEGMENTS _________________|
// .. |_PROGRESSION_|
//
// All 'progression' segments together have about same N of rows as one non-progression,
// we can consider that for same-sized segments we should distribute all rows over (MAX_SEGMENTS-1) segments
// Minimal segment, in turn, is 2^PROGRESSION times smaller than typical for distribution.
// For simplicity, let's run merge with stop on any of 2 criterias: enough rows in segments, and small enough total N of segments.
// In 'ideal' case we will end with exactly 24 (that is MAX_SEGMENTS - MAX_PROGRESSION_SEGMENT) of maximal size, and no progression.
// In 'real' case we most probably end with about 96 segments with different sizes, and they'll be finished to ideal 24..32 by usual route.
auto uTargetRows = uAlive / ( MAX_SEGMENTS - MAX_PROGRESSION_SEGMENT );
sphInfo ( "RAM chunk repairing of %d segments with %d rows. Achieve min segment with %d rows", iSegmentCount, uAlive, uTargetRows );
auto iAliveSegments = iSegmentCount;
CSphVector<int> dSegNums;
dSegNums.Reserve ( iAliveSegments );
dSegNums.Add ( 0 ); // that is for very first step
int iPass = 0;
while ( dRawSegments[dSegNums.First()]->m_tAliveRows.load ( std::memory_order_relaxed ) < uTargetRows && iAliveSegments >= MAX_TOLERATE_LOAD_SEGMENTS )
{
dSegNums.Resize ( iAliveSegments );
for ( int i = 0, j = 0; i < iSegmentCount; ++i )
{
if ( dRawSegments[i] )
dSegNums[j++] = i;
}
dSegNums.Sort ( Lesser ( [&dRawSegments] ( int a, int b ) { return dRawSegments[a]->m_tAliveRows.load ( std::memory_order_relaxed ) < dRawSegments[b]->m_tAliveRows.load ( std::memory_order_relaxed ); } ) );
bool bMergeHappened = false;
sphInfo ( "RAM chunk repairing pass %d (%d segments)", iPass+1, iAliveSegments );
for ( int i = 0, iMax = iAliveSegments - 1; i < iMax; i += 2 )
{
auto& pA = dRawSegments[dSegNums[i]];
auto& pB = dRawSegments[dSegNums[i+1]];
if ( pB->m_tAliveRows.load ( std::memory_order_relaxed ) >= uTargetRows )
break;
auto eDecision = CheckSegmentsPair ( { pA, pB } );
if ( eDecision == CheckMerge_e::MERGE )
{
RtSegmentRefPtf_t pMerged { MergeTwoSegments ( pA, pB ) };
assert ( pMerged );
pA = pMerged;
pB = nullptr;
--iAliveSegments;
bMergeHappened = true;
} // else Fixme! Case FLUSH
}
if ( !bMergeHappened ) // that could happen if it was no positive merge decision. For example, if all segments are huge and so can't be merged at all
break;
++iPass;
}
sphInfo ( "RAM chunk repairing: min segment %d achieved in %d passes; now %d segments left", uTargetRows, iPass, iAliveSegments );
for ( auto& pSeg : dRawSegments )
if ( pSeg )
tWriter.m_pNewRamSegs->Add ( AdoptSegment ( pSeg ) );
dRawSegments.Reset();
sphWarning ( "RAM chunk has %d segments after repairing. You need to flush this table, otherwise result could be LOST, and repairing will start again on daemon's restart", tWriter.m_pNewRamSegs->GetLength() );
}
// field lengths
auto iFields = (int) rdChunk.GetDword();
assert ( iFields==m_tSchema.GetFieldsCount() );
for ( int i=0; i<iFields; ++i )
m_dFieldLensRam[i] = rdChunk.GetOffset();
// all done
return !rdChunk.GetErrorFlag();
}
void RtIndex_c::PostSetup()
{
RtIndex_i::PostSetup();
std::unique_ptr<DocstoreFields_i> pDocstoreFields;
if ( m_tSchema.HasStoredFields() || m_tSchema.HasStoredAttrs() )
{
pDocstoreFields = CreateDocstoreFields();
SetupDocstoreFields ( *pDocstoreFields, m_tSchema );
}
m_pDocstoreFields = std::move ( pDocstoreFields );
m_iMaxCodepointLength = m_pTokenizer->GetMaxCodepointLength();
// bigram filter
if ( m_tSettings.m_eBigramIndex!=SPH_BIGRAM_NONE && m_tSettings.m_eBigramIndex!=SPH_BIGRAM_ALL )
{
m_pTokenizer->SetBuffer ( (BYTE*)const_cast<char*> ( m_tSettings.m_sBigramWords.cstr() ), m_tSettings.m_sBigramWords.Length() );
BYTE * pTok = nullptr;
while ( ( pTok = m_pTokenizer->GetToken() )!=nullptr )
m_tSettings.m_dBigramWords.Add() = (const char*)pTok;
m_tSettings.m_dBigramWords.Sort();
}
// FIXME!!! handle error
m_pTokenizerIndexing = m_pTokenizer->Clone ( SPH_CLONE_INDEX );
Tokenizer::AddBigramFilterTo ( m_pTokenizerIndexing, m_tSettings.m_eBigramIndex, m_tSettings.m_sBigramWords, m_sLastError );
// hitless
CSphString sHitlessFiles = m_tSettings.m_sHitlessFiles;
if ( GetIndexFilenameBuilder() )
{
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder = GetIndexFilenameBuilder() ( GetName() );
if ( pFilenameBuilder )
sHitlessFiles = pFilenameBuilder->GetFullPath ( sHitlessFiles );
}
if ( !LoadHitlessWords ( sHitlessFiles, m_pTokenizerIndexing, m_pDict, m_dHitlessWords, m_sLastError ) )
sphWarning ( "table '%s': %s", GetName(), m_sLastError.cstr() );
// still need index files for index just created from config
if ( !m_bHasFiles )
{
SaveRamChunk ();
SaveMeta ();
}
}
struct MemoryDebugCheckReader_c : public DebugCheckReader_i
{
MemoryDebugCheckReader_c ( const BYTE * pData, const BYTE * pDataEnd )
: m_pData ( pData )
, m_pDataEnd ( pDataEnd )
, m_pCur ( pData )
{}
int64_t GetLengthBytes () override
{
return ( m_pDataEnd - m_pData );
}
bool GetBytes ( void * pData, int iSize ) override
{
if ( m_pCur && m_pCur+iSize<=m_pDataEnd )
{
memcpy ( pData, m_pCur, iSize );
m_pCur += iSize;
return true;
} else
{
return false;
}
}
bool SeekTo ( int64_t iOff, int iHint ) final
{
if ( m_pData && m_pData+iOff<m_pDataEnd )
{
m_pCur = m_pData + iOff;
return true;
} else
{
return false;
}
}
const BYTE * m_pData = nullptr;
const BYTE * m_pDataEnd = nullptr;
const BYTE * m_pCur = nullptr;
};
int RtIndex_c::DebugCheck ( DebugCheckError_i& tReporter, FilenameBuilder_i * )
{
// FIXME! remove copypasted code from CSphIndex_VLN::DebugCheck
if ( m_iLockFD<0 && m_iCheckChunk==-1 )
sphWarning ( "failed to load RAM chunks, checking only %d disk chunks", m_dChunkNames.GetLength() );
if ( m_iStride!=m_tSchema.GetRowSize() )
tReporter.Fail ( "wrong attribute stride (current=%d, should_be=%d)", m_iStride, m_tSchema.GetRowSize() );
if ( m_iRtMemLimit<=0 )
tReporter.Fail ( "wrong RAM limit (current=" INT64_FMT ")", m_iRtMemLimit );
if ( m_iTID<-1 ) // -1 is valid value for 'no binlog'
{
tReporter.Fail ( "table TID < -1 (current=" INT64_FMT ")", m_iTID );
if ( m_iSavedTID<-1 )
tReporter.Fail ( "table saved TID < -1 (current=" INT64_FMT ")", m_iSavedTID );
if ( m_iTID<m_iSavedTID )
tReporter.Fail ( "table TID < table saved TID (current=" INT64_FMT ", saved=" INT64_FMT ")", m_iTID, m_iSavedTID );
}
if ( m_iWordsCheckpoint!=RTDICT_CHECKPOINT_V5 )
tReporter.Fail ( "unexpected number of words per checkpoint (expected 48, got %d)", m_iWordsCheckpoint );
tReporter.Msg ( "checking schema..." );
DebugCheckSchema ( m_tSchema, tReporter );
if ( m_iCheckChunk==-1 )
DebugCheckRam ( tReporter );
int iFailsPlain = DebugCheckDisk ( tReporter );
tReporter.Done();
return int ( tReporter.GetNumFails() + iFailsPlain );
}
void RtIndex_c::DebugCheckRamSegment ( const RtSegment_t & tSegment, int iSegment, DebugCheckError_i & tReporter ) const NO_THREAD_SAFETY_ANALYSIS
{
if ( !tSegment.m_uRows )
{
tReporter.Fail ( "empty RT segment (segment=%d)", iSegment );
return;
}
const BYTE * pCurWord = tSegment.m_dWords.Begin();
const BYTE * pMaxWord = pCurWord+tSegment.m_dWords.GetLength();
const BYTE * pCurDoc = tSegment.m_dDocs.Begin();
const BYTE * pMaxDoc = pCurDoc+tSegment.m_dDocs.GetLength();
const BYTE * pCurHit = tSegment.m_dHits.Begin();
const BYTE * pMaxHit = pCurHit+tSegment.m_dHits.GetLength();
CSphVector<RtWordCheckpoint_t> dRefCheckpoints;
int nWordsRead = 0;
int nCheckpointWords = 0;
int iCheckpointOffset = 0;
SphWordID_t uPrevWordID = 0;
DWORD uPrevDocOffset = 0;
DWORD uPrevHitOffset = 0;
RtWord_t tWord;
tWord.m_bHasHitlist = false;
BYTE sWord[SPH_MAX_KEYWORD_LEN+2], sLastWord[SPH_MAX_KEYWORD_LEN+2];
memset ( sWord, 0, sizeof(sWord) );
memset ( sLastWord, 0, sizeof(sLastWord) );
auto szWord = (const char*) ( sWord + 1 );
int iLastWordLen = 0, iWordLen = 0;
while ( pCurWord && pCurWord<pMaxWord )
{
bool bCheckpoint = ++nCheckpointWords==m_iWordsCheckpoint;
if ( bCheckpoint )
{
nCheckpointWords = 1;
iCheckpointOffset = int ( pCurWord - tSegment.m_dWords.Begin() );
tWord.m_uDoc = 0;
if ( !m_bKeywordDict )
tWord.m_uWordID = 0;
}
const BYTE * pIn = pCurWord;
if ( m_bKeywordDict )
{
BYTE iMatch, iDelta, uPacked;
uPacked = *pIn++;
if ( pIn>=pMaxWord )
{
tReporter.Fail ( "reading past wordlist end (segment=%d, word=%d)", iSegment, nWordsRead );
break;
}
if ( uPacked & 0x80 )
{
iDelta = ( ( uPacked>>4 ) & 7 ) + 1;
iMatch = uPacked & 15;
} else
{
iDelta = uPacked & 127;
iMatch = *pIn++;
if ( pIn>=pMaxWord )
{
tReporter.Fail ( "reading past wordlist end (segment=%d, word=%d)", iSegment, nWordsRead );
break;
}
if ( iDelta<=8 && iMatch<=15 )
{
sLastWord[sizeof(sLastWord)-1] = '\0';
tReporter.Fail ( "wrong word-delta (segment=%d, word=%d, last_word=%s, last_len=%d, match=%d, delta=%d)",
iSegment, nWordsRead, sLastWord+1, iLastWordLen, iMatch, iDelta );
}
}
if ( iMatch+iDelta>=(int)sizeof(sWord)-2 || iMatch>iLastWordLen )
{
sLastWord[sizeof(sLastWord)-1] = '\0';
tReporter.Fail ( "wrong word-delta (segment=%d, word=%d, last_word=%s, last_len=%d, match=%d, delta=%d)",
iSegment, nWordsRead, sLastWord+1, iLastWordLen, iMatch, iDelta );
pIn += iDelta;
if ( pIn>=pMaxWord )
{
tReporter.Fail ( "reading past wordlist end (segment=%d, word=%d)", iSegment, nWordsRead );
break;
}
} else
{
iWordLen = iMatch+iDelta;
sWord[0] = (BYTE)iWordLen;
memcpy ( sWord+1+iMatch, pIn, iDelta );
sWord[1+iWordLen] = 0;
pIn += iDelta;
if ( pIn>=pMaxWord )
{
tReporter.Fail ( "reading past wordlist end (segment=%d, word=%d)", iSegment, nWordsRead );
break;
}
}
auto iCalcWordLen = (int) strlen ( (const char *)sWord+1 );
if ( iWordLen!=iCalcWordLen )
{
sWord[sizeof(sWord)-1] = '\0';
tReporter.Fail ( "word length mismatch (segment=%d, word=%d, read_word=%s, read_len=%d, calc_len=%d)", iSegment, nWordsRead, sWord+1, iWordLen, iCalcWordLen );
}
if ( !iWordLen )
tReporter.Fail ( "empty word in word list (segment=%d, word=%d)", iSegment, nWordsRead );
const BYTE * pStr = sWord+1;
const BYTE * pStringStart = pStr;
while ( pStringStart-pStr < iWordLen )
{
if ( !*pStringStart )
{
CSphString sErrorStr;
sErrorStr.SetBinary ( (const char*)pStr, iWordLen );
tReporter.Fail ( "embedded zero in a word list string (segment=%d, offset=%u, string=%s)", iSegment, (DWORD)(pStringStart-pStr), sErrorStr.cstr() );
}
pStringStart++;
}
if ( iLastWordLen && iWordLen )
{
if ( sphDictCmpStrictly ( (const char *)sWord+1, iWordLen, (const char *)sLastWord+1, iLastWordLen )<=0 )
{
sWord[sizeof(sWord)-1] = '\0';
sLastWord[sizeof(sLastWord)-1] = '\0';
tReporter.Fail ( "word order decreased (segment=%d, word=%d, read_word=%s, last_word=%s)", iSegment, nWordsRead, sWord+1, sLastWord+1 );
}
}
memcpy ( sLastWord, sWord, iWordLen+2 );
iLastWordLen = iWordLen;
} else
{
tWord.m_uWordID += UnzipWordid ( pIn );
if ( pIn>=pMaxWord )
tReporter.Fail ( "reading past wordlist end (segment=%d, word=%d)", iSegment, nWordsRead );
if ( tWord.m_uWordID<=uPrevWordID )
{
tReporter.Fail ( "wordid decreased (segment=%d, word=%d, wordid=" UINT64_FMT ", previd=" UINT64_FMT ")", iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, (uint64_t)uPrevWordID );
}
uPrevWordID = tWord.m_uWordID;
}
UnzipDword ( &tWord.m_uDocs, pIn );
if ( pIn>=pMaxWord )
{
sWord[sizeof(sWord)-1] = '\0';
tReporter.Fail ( "invalid docs/hits (segment=%d, word=%d, read_word=%s, docs=%u, hits=%u)", iSegment, nWordsRead, sWord+1, tWord.m_uDocs, tWord.m_uHits );
}
UnzipDword ( &tWord.m_uHits, pIn );
if ( pIn>=pMaxWord )
tReporter.Fail ( "reading past wordlist end (segment=%d, word=%d)", iSegment, nWordsRead );
tWord.m_uDoc += UnzipDword ( pIn );
if ( pIn>pMaxWord )
tReporter.Fail ( "reading past wordlist end (segment=%d, word=%d)", iSegment, nWordsRead );
pCurWord = pIn;
if ( !tWord.m_uDocs || !tWord.m_uHits || tWord.m_uHits<tWord.m_uDocs )
{
sWord[sizeof(sWord)-1] = '\0';
tReporter.Fail ( "invalid docs/hits (segment=%d, word=%d, read_wordid=" UINT64_FMT ", read_word=%s, docs=%u, hits=%u)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tWord.m_uDocs, tWord.m_uHits );
}
if ( bCheckpoint )
{
RtWordCheckpoint_t & tCP = dRefCheckpoints.Add();
tCP.m_iOffset = iCheckpointOffset;
if ( m_bKeywordDict )
{
tCP.m_szWord = new char [sWord[0]+1];
memcpy ( (void *)tCP.m_szWord, sWord+1, sWord[0]+1 );
} else
tCP.m_uWordID = tWord.m_uWordID;
}
sWord[sizeof(sWord)-1] = '\0';
if ( uPrevDocOffset && tWord.m_uDoc<=uPrevDocOffset )
tReporter.Fail ( "doclist offset decreased (segment=%d, word=%d, read_wordid=" UINT64_FMT ", read_word=%s, doclist_offset=%u, prev_doclist_offset=%u)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tWord.m_uDoc, uPrevDocOffset );
// read doclist
auto uDocOffset = DWORD ( pCurDoc-tSegment.m_dDocs.Begin() );
if ( tWord.m_uDoc!=uDocOffset )
{
tReporter.Fail ( "unexpected doclist offset (wordid=" UINT64_FMT "(%s)(%d), doclist_offset=%u, expected_offset=%u)",
(uint64_t)tWord.m_uWordID, szWord, nWordsRead, tWord.m_uDoc, uDocOffset );
if ( uDocOffset>=(DWORD)tSegment.m_dDocs.GetLength() )
{
tReporter.Fail ( "doclist offset pointing past doclist (segment=%d, word=%d, read_word=%s, doclist_offset=%u, doclist_size=%d)",
iSegment, nWordsRead, szWord, uDocOffset, tSegment.m_dDocs.GetLength() );
nWordsRead++;
continue;
} else
pCurDoc = tSegment.m_dDocs.Begin()+uDocOffset;
}
// read all docs from doclist
RtDoc_t tDoc;
RowID_t tPrevRowID = INVALID_ROWID;
for ( DWORD uDoc=0; uDoc<tWord.m_uDocs && pCurDoc<pMaxDoc; uDoc++ )
{
bool bEmbeddedHit = false;
pIn = pCurDoc;
tDoc.m_tRowID += UnzipDword ( pIn );
if ( pIn>=pMaxDoc )
{
tReporter.Fail ( "reading past doclist end (segment=%d, word=%d, read_wordid=" UINT64_FMT ", read_word=%s, doclist_offset=%u, doclist_size=%d)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, uDocOffset, tSegment.m_dDocs.GetLength() );
break;
}
UnzipDword ( &tDoc.m_uDocFields, pIn );
if ( pIn>=pMaxDoc )
{
tReporter.Fail ( "reading past doclist end (segment=%d, word=%d, read_wordid=" UINT64_FMT ", read_word=%s, doclist_offset=%u, doclist_size=%d)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, uDocOffset, tSegment.m_dDocs.GetLength() );
break;
}
UnzipDword ( &tDoc.m_uHits, pIn );
if ( pIn>=pMaxDoc )
{
tReporter.Fail ( "reading past doclist end (segment=%d, word=%d, read_wordid=" UINT64_FMT ", read_word=%s, doclist_offset=%u, doclist_size=%d)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, uDocOffset, tSegment.m_dDocs.GetLength() );
break;
}
if ( tDoc.m_uHits==1 )
{
bEmbeddedHit = true;
auto a = UnzipDword ( pIn );
if ( pIn>=pMaxDoc )
{
tReporter.Fail ( "reading past doclist end (segment=%d, word=%d, read_wordid=" UINT64_FMT ", read_word=%s, doclist_offset=%u, doclist_size=%d)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, uDocOffset, tSegment.m_dDocs.GetLength() );
break;
}
auto b = UnzipDword ( pIn );
if ( pIn>pMaxDoc )
{
tReporter.Fail ( "reading past doclist end (segment=%d, word=%d, read_wordid=" UINT64_FMT ", read_word=%s, doclist_offset=%u, doclist_size=%d)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, uDocOffset, tSegment.m_dDocs.GetLength() );
break;
}
tDoc.m_uHit = HITMAN::Create ( b, a );
} else
{
UnzipDword ( &tDoc.m_uHit, pIn );
if ( pIn>pMaxDoc )
{
tReporter.Fail ( "reading past doclist end (segment=%d, word=%d, read_wordid=" UINT64_FMT ", read_word=%s, doclist_offset=%u, doclist_size=%d)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, uDocOffset, tSegment.m_dDocs.GetLength() );
break;
}
}
pCurDoc = pIn;
if ( uDoc && tDoc.m_tRowID<=tPrevRowID )
{
tReporter.Fail ( "rowid decreased (segment=%d, word=%d, read_wordid=" UINT64_FMT ", read_word=%s, rowid=%u, prev_rowid=%u)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID, tPrevRowID );
}
if ( tDoc.m_tRowID>=tSegment.m_uRows )
tReporter.Fail ( "invalid rowid (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u(%u))", iSegment, nWordsRead, tWord.m_uWordID, szWord, tDoc.m_tRowID, tSegment.m_uRows );
if ( bEmbeddedHit )
{
DWORD uFieldId = HITMAN::GetField ( tDoc.m_uHit );
DWORD uFieldMask = tDoc.m_uDocFields;
int iCounter = 0;
for ( ; uFieldMask; iCounter++ )
uFieldMask &= uFieldMask - 1;
if ( iCounter!=1 || tDoc.m_uHits!=1 )
{
tReporter.Fail ( "embedded hit with multiple occurences in a document found (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID );
}
if ( (int)uFieldId>m_tSchema.GetFieldsCount() || uFieldId>SPH_MAX_FIELDS )
{
tReporter.Fail ( "invalid field id in an embedded hit (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u, field_id=%u, total_fields=%d)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID, uFieldId, m_tSchema.GetFieldsCount() );
}
if ( !( tDoc.m_uDocFields & ( 1 << uFieldId ) ) )
{
tReporter.Fail ( "invalid field id: not in doclist mask (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u, field_id=%u, field_mask=%u)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID, uFieldId, tDoc.m_uDocFields );
}
} else
{
auto uExpectedHitOffset = DWORD ( pCurHit-tSegment.m_dHits.Begin() );
if ( tDoc.m_uHit!=uExpectedHitOffset )
{
tReporter.Fail ( "unexpected hitlist offset (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u, offset=%u, expected_offset=%u",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID, tDoc.m_uHit, uExpectedHitOffset );
}
if ( tDoc.m_uHit && tDoc.m_uHit<=uPrevHitOffset )
{
tReporter.Fail ( "hitlist offset decreased (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u, offset=%u, prev_offset=%u",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID, tDoc.m_uHit, uPrevHitOffset );
}
// check hitlist
DWORD uHitlistEntry = 0;
DWORD uLastPosInField = 0;
DWORD uLastFieldId = 0;
bool bLastInFieldFound = false;
for ( DWORD uHit = 0; uHit < tDoc.m_uHits && pCurHit; uHit++ )
{
uHitlistEntry += UnzipDword ( pCurHit );
if ( pCurHit>pMaxHit )
{
tReporter.Fail ( "reading past hitlist end (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u)", iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID );
break;
}
DWORD uPosInField = HITMAN::GetPos ( uHitlistEntry );
bool bLastInField = HITMAN::IsEnd ( uHitlistEntry );
DWORD uFieldId = HITMAN::GetField ( uHitlistEntry );
if ( (int)uFieldId>m_tSchema.GetFieldsCount() || uFieldId>SPH_MAX_FIELDS )
{
tReporter.Fail ( "invalid field id in a hitlist (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u, field_id=%u, total_fields=%d)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID, uFieldId, m_tSchema.GetFieldsCount() );
}
if ( !( tDoc.m_uDocFields & ( 1 << uFieldId ) ) )
{
tReporter.Fail ( "invalid field id: not in doclist mask (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u, field_id=%u, field_mask=%u)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID, uFieldId, tDoc.m_uDocFields );
}
if ( uLastFieldId!=uFieldId )
{
bLastInFieldFound = false;
uLastPosInField = 0;
}
if ( uLastPosInField && uPosInField<=uLastPosInField )
{
tReporter.Fail ( "hit position in field decreased (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u, pos=%u, last_pos=%u)",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID, uPosInField, uLastPosInField );
}
if ( bLastInField && bLastInFieldFound )
tReporter.Fail ( "duplicate last-in-field hit found (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u)", iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID );
uLastPosInField = uPosInField;
uLastFieldId = uFieldId;
bLastInFieldFound |= bLastInField;
}
uPrevHitOffset = tDoc.m_uHit;
}
DWORD uAvailFieldMask = ( 1 << m_tSchema.GetFieldsCount() ) - 1;
if ( tDoc.m_uDocFields & ~uAvailFieldMask )
{
tReporter.Fail ( "wrong document field mask (segment=%d, word=%d, wordid=" UINT64_FMT "(%s), rowid=%u, mask=%u, total_fields=%d",
iSegment, nWordsRead, (uint64_t)tWord.m_uWordID, szWord, tDoc.m_tRowID, tDoc.m_uDocFields, m_tSchema.GetFieldsCount() );
}
tPrevRowID = tDoc.m_tRowID;
}
uPrevDocOffset = tWord.m_uDoc;
nWordsRead++;
}
if ( pCurDoc!=pMaxDoc )
tReporter.Fail ( "unused doclist entries found (segment=%d, doclist_size=%d)", iSegment, tSegment.m_dDocs.GetLength() );
if ( pCurHit!=pMaxHit )
tReporter.Fail ( "unused hitlist entries found (segment=%d, hitlist_size=%d)", iSegment, tSegment.m_dHits.GetLength() );
if ( dRefCheckpoints.GetLength()!=tSegment.m_dWordCheckpoints.GetLength() )
tReporter.Fail ( "word checkpoint count mismatch (read=%d, calc=%d)", tSegment.m_dWordCheckpoints.GetLength(), dRefCheckpoints.GetLength() );
for ( int i=0; i < Min ( dRefCheckpoints.GetLength(), tSegment.m_dWordCheckpoints.GetLength() ); i++ )
{
const RtWordCheckpoint_t & tRefCP = dRefCheckpoints[i];
const RtWordCheckpoint_t & tCP = tSegment.m_dWordCheckpoints[i];
const int iLen = m_bKeywordDict ? (const int) strlen ( tCP.m_szWord ) : 0;
if ( m_bKeywordDict && ( !tCP.m_szWord || ( !strlen ( tRefCP.m_szWord ) || !strlen ( tCP.m_szWord ) ) ) )
{
tReporter.Fail ( "empty word checkpoint %d ((segment=%d, read_word=%s, read_len=%u, readpos=%d, calc_word=%s, calc_len=%u, calcpos=%d)",
i, iSegment, tCP.m_szWord, (DWORD)strlen ( tCP.m_szWord ), tCP.m_iOffset,
tRefCP.m_szWord, (DWORD)strlen ( tRefCP.m_szWord ), tRefCP.m_iOffset );
} else if ( sphCheckpointCmpStrictly ( tCP.m_szWord, iLen, tCP.m_uWordID, m_bKeywordDict, tRefCP ) || tRefCP.m_iOffset!=tCP.m_iOffset )
{
if ( m_bKeywordDict )
{
tReporter.Fail ( "word checkpoint %d differs (segment=%d, read_word=%s, readpos=%d, calc_word=%s, calcpos=%d)",
i, iSegment, tCP.m_szWord, tCP.m_iOffset, tRefCP.m_szWord, tRefCP.m_iOffset );
} else
{
tReporter.Fail ( "word checkpoint %d differs (segment=%d, readid=" UINT64_FMT ", readpos=%d, calcid=" UINT64_FMT ", calcpos=%d)",
i, iSegment, (uint64_t)tCP.m_uWordID, tCP.m_iOffset, (int64_t)tRefCP.m_uWordID, tRefCP.m_iOffset );
}
}
}
if ( m_bKeywordDict )
ARRAY_FOREACH ( i, dRefCheckpoints )
SafeDeleteArray ( dRefCheckpoints[i].m_szWord );
dRefCheckpoints.Reset ();
MemoryDebugCheckReader_c tAttrs ( (const BYTE *)tSegment.m_dRows.begin(), (const BYTE *)tSegment.m_dRows.end() );
MemoryDebugCheckReader_c tBlobs ( tSegment.m_dBlobs.begin(), tSegment.m_dBlobs.end() );
DebugCheck_Attributes ( tAttrs, tBlobs, tSegment.m_uRows, 0, m_tSchema, tReporter );
DebugCheck_DeadRowMap ( tSegment.m_tDeadRowMap.GetLengthBytes(), tSegment.m_uRows, tReporter );
DWORD uCalcAliveRows = tSegment.m_tDeadRowMap.GetNumAlive();
if ( tSegment.m_tAliveRows.load(std::memory_order_relaxed)!=uCalcAliveRows )
tReporter.Fail ( "alive row count mismatch (segment=%d, expected=%u, current=%u)", iSegment, uCalcAliveRows,
tSegment.m_tAliveRows.load ( std::memory_order_relaxed ) );
} // NOLINT function length
void RtIndex_c::DebugCheckRam ( DebugCheckError_i & tReporter ) NO_THREAD_SAFETY_ANALYSIS
{
auto pRamSegs = m_tRtChunks.RamSegs();
auto& dRamSegs = *pRamSegs;
ARRAY_FOREACH ( iSegment, dRamSegs )
{
tReporter.Msg ( "checking RT segment %d(%d)...", iSegment, dRamSegs.GetLength() );
if ( !dRamSegs[iSegment] )
{
tReporter.Fail ( "missing RT segment (segment=%d)", iSegment );
continue;
}
const RtSegment_t & tSegment = *dRamSegs[iSegment];
DebugCheckRamSegment ( tSegment, iSegment, tReporter );
}
}
constexpr int FAILS_THRESH = 100;
class DebugCheckInternal : public DebugCheckError_i
{
int64_t m_iFails { 0 };
StringBuilder_c m_sMsg { "\n" };
public:
bool Fail ( const char* szFmt, ... ) override;
void Msg ( const char* szFmt, ... ) override {};
void Progress ( const char* szFmt, ... ) override {};
void Done() override {};
int64_t GetNumFails() const override;
inline const char* cstr() const { return m_sMsg.cstr(); }
};
bool DebugCheckInternal::Fail ( const char* szFmt, ... )
{
if ( ++m_iFails >= FAILS_THRESH )
return false;
va_list ap;
va_start ( ap, szFmt );
m_sMsg.vSprintf ( szFmt, ap );
va_end ( ap );
return false;
}
int64_t DebugCheckInternal::GetNumFails() const
{
return m_iFails;
}
CSphString RtIndex_c::MakeDamagedName () const
{
CSphString sChunk;
sChunk.SetSprintf ( "%s/damaged.%s.%d.%d", Binlog::GetPath().cstr(), GetName(), getpid(), m_iTrackFailedRamActions );
return sChunk;
}
void RtIndex_c::DumpSegments ( VecTraits_T<const RtSegment_t*> dSegments, const CSphString& sFile ) const
{
if ( dSegments.IsEmpty() )
return;
CSphString sLastError;
CSphWriter wrChunk;
if ( !wrChunk.OpenFile ( sFile, sLastError ) )
{
sphWarning ("Unable to open %s, error %s", sFile.cstr(), sLastError.cstr() );
return;
}
wrChunk.PutDword ( 0 );
wrChunk.PutDword ( dSegments.GetLength() );
// no locks here, because it's only intended to be called from dtor
for ( const RtSegment_t* pSeg : dSegments )
{
SccRL_t rLock ( pSeg->m_tLock );
SaveRamSegment ( pSeg, wrChunk );
}
SaveRamFieldLengths ( wrChunk );
wrChunk.CloseFile();
}
void RtIndex_c::DumpSegment ( const RtSegment_t* pSeg, const CSphString& sFile ) const
{
assert ( pSeg );
LazyVector_T<const RtSegment_t*> dSegments;
dSegments.Add ( pSeg );
DumpSegments ( dSegments, sFile );
}
void RtIndex_c::DumpMeta ( const CSphString& sFile ) const
{
CSphString sLastError;
CSphWriter wrMeta;
if ( !wrMeta.OpenFile ( sFile, sLastError ) )
{
sphWarning ( "Unable to open %s, error %s", sFile.cstr(), sLastError.cstr() );
return;
}
WriteMeta ( m_iTID, { nullptr, 0 }, wrMeta );
wrMeta.CloseFile();
// write new meta
if ( wrMeta.IsError() )
sphWarning ( "%s", sLastError.cstr() );
}
void RtIndex_c::DumpInsert ( const RtSegment_t* pNewSeg ) const
{
if ( !pNewSeg || m_iTrackFailedRamActions <= 0 )
return;
auto tDescription = myinfo::UnsafeDescription();
if ( tDescription.second > 6 && !memcmp ( tDescription.first, "SYSTEM ", 7 ) )
return;
CSphString sLastError;
CSphString sBase = MakeDamagedName ();
CSphString sFile;
sFile.SetSprintf ( "%s.stmt", sBase.cstr() );
CSphWriter wrContent;
if ( !wrContent.OpenFile ( sFile, sLastError ) )
{
sphWarning ( "Unable to open %s, error %s", sFile.cstr(), sLastError.cstr() );
return;
}
wrContent.PutBytes ( tDescription.first, tDescription.second );
wrContent.CloseFile();
// write new meta
if ( wrContent.IsError() )
sphWarning ( "%s", sLastError.cstr() );
sFile.SetSprintf ( "%s.ram", sBase.cstr() );
DumpSegment ( pNewSeg, sFile );
sFile.SetSprintf ( "%s.meta", sBase.cstr() );
DumpMeta( sFile );
sphWarning ( "Damaged Insert saved as %s, files .stmt, .ram and .meta", sBase.cstr() );
--m_iTrackFailedRamActions;
}
void RtIndex_c::DumpMerge ( const RtSegment_t* pA, const RtSegment_t* pB, const RtSegment_t* pNew ) const
{
if ( m_iTrackFailedRamActions <= 0 )
return;
LazyVector_T<const RtSegment_t*> dSegments;
if ( pA )
dSegments.Add ( pA );
if ( pB )
dSegments.Add ( pB );
CSphString sBase = MakeDamagedName();
CSphString sFile;
sFile.SetSprintf ( "%s.origin.ram", sBase.cstr() );
DumpSegments ( dSegments, sFile );
dSegments.Reset();
if ( pNew )
dSegments.Add ( pNew );
sFile.SetSprintf ( "%s.ram", sBase.cstr() );
DumpSegments ( dSegments, sFile );
sFile.SetSprintf ( "%s.meta", sBase.cstr() );
DumpMeta(sFile);
sphWarning ( "Damaged Merge saved as %s, files .origin.ram, .ram and .meta", sBase.cstr() );
--m_iTrackFailedRamActions;
}
bool RtIndex_c::CheckSegmentConsistency ( const RtSegment_t* pNewSeg, bool bSilent ) const
{
assert ( pNewSeg );
if ( m_iTrackFailedRamActions<=0 || pNewSeg->m_bConsistent )
return true;
DebugCheckInternal tChecker;
DebugCheckRamSegment ( *pNewSeg, 0, tChecker );
if ( !tChecker.GetNumFails() )
{
pNewSeg->m_bConsistent = true;
return true;
}
if ( !bSilent )
{
sphWarning ( "CheckSegmentConsistency returned %d errors", (int)tChecker.GetNumFails() );
sphWarning ( "%s", tChecker.cstr() );
}
return false;
}
int RtIndex_c::DebugCheckDisk ( DebugCheckError_i & tReporter )
{
CreateFilenameBuilder_fn fnCreateFilenameBuilder = GetIndexFilenameBuilder();
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder;
if ( fnCreateFilenameBuilder )
pFilenameBuilder = fnCreateFilenameBuilder ( GetName() );
VecTraits_T<int> dChunks = m_dChunkNames.Slice();
if ( m_iCheckChunk!=-1 )
{
int iChunk = dChunks.GetFirst ( [&] ( int & v ) { return m_iCheckChunk==v; } );
if ( iChunk==-1 )
{
tReporter.Fail ( "failed to find disk chunk %s.%d, disk chunks total %d", GetFilebase(), m_iCheckChunk, m_dChunkNames.GetLength() );
return 1;
}
dChunks = m_dChunkNames.Slice ( iChunk, 1 );
}
int iFailsPlain = 0;
StrVec_t dWarnings;
ARRAY_FOREACH ( i, dChunks )
{
int iChunk = dChunks[i];
CSphString sChunk = GetFilename ( iChunk );
tReporter.Msg ( "checking disk chunk, extension %d, %d(%d)...", dChunks[i], i, m_dChunkNames.GetLength() );
auto pIndex = PreallocDiskChunk ( sChunk.cstr(), iChunk, pFilenameBuilder.get(), dWarnings, m_sLastError );
if ( pIndex )
{
iFailsPlain += pIndex->DebugCheck ( tReporter, pFilenameBuilder.get() );
} else
{
tReporter.Fail ( "%s", m_sLastError.cstr() );
m_sLastError = "";
iFailsPlain++;
}
}
for ( const auto & i : dWarnings )
tReporter.Msg ( "warning: %s", i.cstr() );
return iFailsPlain;
}
void RtIndex_c::SetDebugCheck ( bool bCheckIdDups, int iCheckChunk )
{
m_bDebugCheck = true;
m_bCheckIdDups = bCheckIdDups;
m_iCheckChunk = iCheckChunk;
ProhibitSave();
}
//////////////////////////////////////////////////////////////////////////
// SEARCHING
//////////////////////////////////////////////////////////////////////////
struct RtQword_t final : public ISphQword
{
public:
RtQword_t () = default;
const CSphMatch & GetNextDoc() final
{
while (true)
{
if ( !m_tDocReader.UnzipDoc() )
{
m_tMatch.m_tRowID = INVALID_ROWID;
return m_tMatch;
}
if ( !m_pSeg->m_tDeadRowMap.IsSet ( m_tDocReader->m_tRowID ) )
break;
}
const auto* pDoc = (const RtDoc_t*)m_tDocReader;
m_tMatch.m_tRowID = pDoc->m_tRowID;
m_dQwordFields.Assign32 ( pDoc->m_uDocFields );
m_uMatchHits = pDoc->m_uHits;
m_iHitlistPos = (uint64_t(pDoc->m_uHits)<<32) + pDoc->m_uHit;
m_bAllFieldsKnown = false;
return m_tMatch;
}
void SeekHitlist ( SphOffset_t uOff ) final
{
auto iHits = (int)(uOff>>32);
if ( iHits==1 )
{
m_uNextHit = DWORD(uOff);
} else
{
m_uNextHit = 0;
m_tHitReader.Seek ( m_pHits + DWORD(uOff), iHits );
}
}
Hitpos_t GetNextHit () final
{
if ( !m_uNextHit )
return Hitpos_t ( m_tHitReader.UnzipHit() );
else if ( m_uNextHit==0xffffffffUL )
return EMPTY_HIT;
else
return Hitpos_t ( std::exchange ( m_uNextHit, 0xffffffffUL ) );
}
bool SetupScan ( const RtIndex_c * pIndex, int iSegment, const RtGuard_t& tGuard ) final
{
return pIndex->RtQwordSetup ( this, iSegment, tGuard );
}
void SetupReader ( const RtSegment_t * pSeg, const RtWord_t & tWord )
{
m_pSeg = pSeg;
m_tDocReader.Init ( pSeg, tWord );
m_pHits = pSeg->m_dHits.begin();
}
private:
RtDocReader_c m_tDocReader;
RtHitReader_c m_tHitReader;
CSphMatch m_tMatch;
DWORD m_uNextHit {0};
const BYTE* m_pHits {nullptr};
const RtSegment_t * m_pSeg {nullptr};
};
//////////////////////////////////////////////////////////////////////////
struct RtSubstringPayload_t : public ISphSubstringPayload
{
RtSubstringPayload_t ( int iSegmentCount, int iDoclists )
: m_dSegment2Doclists ( iSegmentCount )
, m_dDoclist ( iDoclists )
{}
CSphFixedVector<Slice_t> m_dSegment2Doclists;
CSphFixedVector<Slice_t> m_dDoclist;
};
struct RtQwordPayload_t final : public ISphQword
{
public:
explicit RtQwordPayload_t ( const RtSubstringPayload_t * pPayload )
: m_pPayload ( pPayload )
{
m_tMatch.Reset ( 0 );
m_iDocs = m_pPayload->m_iTotalDocs;
m_iHits = m_pPayload->m_iTotalHits;
m_uDoclist = 0;
m_uDoclistLeft = 0;
m_pSegment = nullptr;
m_uHitEmbeded = EMPTY_HIT;
}
const CSphMatch & GetNextDoc() final
{
m_iHits = 0;
while ( true )
if ( !m_tDocReader.UnzipDoc() )
{
if ( !m_uDoclistLeft )
{
m_tMatch.m_tRowID = INVALID_ROWID;
return m_tMatch;
}
SetupReader();
} else if ( !m_pSegment->m_tDeadRowMap.IsSet ( m_tDocReader->m_tRowID ) )
break;
const auto* pDoc = (const RtDoc_t*) m_tDocReader;
m_tMatch.m_tRowID = pDoc->m_tRowID;
m_dQwordFields.Assign32 ( pDoc->m_uDocFields );
m_bAllFieldsKnown = false;
m_iHits = pDoc->m_uHits;
m_uHitEmbeded = pDoc->m_uHit;
m_tHitReader.Seek ( *m_pSegment, *pDoc );
return m_tMatch;
}
void SeekHitlist ( SphOffset_t ) final
{}
Hitpos_t GetNextHit () final
{
if ( m_iHits>1 )
return Hitpos_t ( m_tHitReader.UnzipHit() );
return ( m_iHits==1 ) ? Hitpos_t ( std::exchange ( m_uHitEmbeded, EMPTY_HIT ) ) : EMPTY_HIT;
}
bool SetupScan ( const RtIndex_c *, int iSegment, const RtGuard_t& tGuard ) final
{
m_tDocReader.Reset();
if ( iSegment<0 )
{
m_pSegment = nullptr;
m_uDoclist = 0;
m_uDoclistLeft = 0;
return false;
}
m_pSegment = tGuard.m_dRamSegs[iSegment];
m_uDoclist = m_pPayload->m_dSegment2Doclists[iSegment].m_uOff;
m_uDoclistLeft = m_pPayload->m_dSegment2Doclists[iSegment].m_uLen;
if ( !m_uDoclistLeft )
return false;
SetupReader();
return true;
}
private:
void SetupReader ()
{
assert ( m_uDoclistLeft );
RtWord_t tWord;
tWord.m_uDoc = m_pPayload->m_dDoclist[m_uDoclist].m_uOff;
tWord.m_uDocs = m_pPayload->m_dDoclist[m_uDoclist].m_uLen;
m_tDocReader.Init ( m_pSegment, tWord );
++m_uDoclist;
--m_uDoclistLeft;
}
const RtSubstringPayload_t * m_pPayload;
CSphMatch m_tMatch;
RtDocReader_c m_tDocReader;
RtHitReader_c m_tHitReader;
const RtSegment_t * m_pSegment;
DWORD m_uDoclist;
DWORD m_uDoclistLeft;
DWORD m_uHitEmbeded;
};
class RtScanQword_t final : public QwordScan_c
{
public:
explicit RtScanQword_t ( int iRowsTotal )
: QwordScan_c ( iRowsTotal )
{
}
bool SetupScan ( const RtIndex_c * pIndex, int iSegment, const RtGuard_t& tGuard ) final
{
m_tDoc.Reset ( 0 );
if ( iSegment<0 )
return false;
const auto& pSegment = tGuard.m_dRamSegs[iSegment];
m_iRowsCount = pSegment->m_uRows;
m_iDocs = m_iRowsCount;
m_bDone = ( m_iRowsCount==0 );
m_dQwordFields.SetAll();
m_pKilled = &pSegment->m_tDeadRowMap;
return ( pSegment->m_tAliveRows.load(std::memory_order_relaxed)>0 );
}
bool IsAliveRow ( RowID_t tRowID ) const override
{
return !m_pKilled->IsSet ( tRowID );
}
private:
const DeadRowMap_Ram_c * m_pKilled { nullptr };
};
//////////////////////////////////////////////////////////////////////////
class RtQwordSetup_t final : public ISphQwordSetup
{
public:
explicit RtQwordSetup_t ( const RtGuard_t& tGuard );
ISphQword * QwordSpawn ( const XQKeyword_t & ) const final;
bool QwordSetup ( ISphQword * pQword ) const final;
void SetSegment ( int iSegment ) { m_iSeg = iSegment; }
ISphQword * ScanSpawn() const final;
private:
const RtGuard_t& m_tGuard;
int m_iSeg;
};
RtQwordSetup_t::RtQwordSetup_t ( const RtGuard_t& tGuard )
: m_tGuard ( tGuard )
, m_iSeg ( -1 )
{ }
ISphQword * RtQwordSetup_t::QwordSpawn ( const XQKeyword_t & tWord ) const
{
if ( !tWord.m_pPayload )
return new RtQword_t ();
else
return new RtQwordPayload_t ( (const RtSubstringPayload_t *)tWord.m_pPayload );
}
bool RtQwordSetup_t::QwordSetup ( ISphQword * pQword ) const
{
// there was two dynamic_casts here once but they're not necessary
// maybe it's worth to rewrite class hierarchy to avoid c-casts here?
const auto * pIndex = (const RtIndex_c *)m_pIndex;
return pQword->SetupScan ( pIndex, m_iSeg, m_tGuard );
}
bool RtIndex_c::EarlyReject ( CSphQueryContext * pCtx, CSphMatch & tMatch ) const
{
auto pSegment = (RtSegment_t*)const_cast<void*>(pCtx->m_pIndexData);
tMatch.m_pStatic = pSegment->GetDocinfoByRowID ( tMatch.m_tRowID );
pCtx->CalcFilter ( tMatch );
if ( !pCtx->m_pFilter )
return false;
if ( !pCtx->m_pFilter->Eval ( tMatch ) )
{
pCtx->FreeDataFilter ( tMatch );
return true;
}
return false;
}
ISphQword * RtQwordSetup_t::ScanSpawn () const
{
return new RtScanQword_t ( (int)m_pIndex->GetStats().m_iTotalDocuments );
}
// WARNING, setup is pretty tricky
// for RT queries, we setup qwords several times
// first pass (with NULL segment arg) should sum all stats over all segments
// others passes (with non-NULL segments) should setup specific segment (including local stats)
bool RtIndex_c::RtQwordSetupSegment ( RtQword_t * pQword, const RtSegment_t * pCurSeg, bool bSetup ) const
{
if ( !pCurSeg )
return false;
SphWordID_t uWordID = pQword->m_uWordID;
const char * sWord = pQword->m_sDictWord.cstr();
int iWordLen = pQword->m_sDictWord.Length();
bool bPrefix = false;
if ( m_bKeywordDict && iWordLen && sWord[iWordLen-1]=='*' ) // crc star search emulation
{
iWordLen = iWordLen-1;
bPrefix = true;
}
if ( !iWordLen )
return false;
// prevent prefix matching for explicitly setting prohibited by config, to be on pair with plain index (or CRC kind of index)
if ( bPrefix && ( ( m_tSettings.GetMinPrefixLen ( m_bKeywordDict ) && iWordLen< m_tSettings.GetMinPrefixLen ( m_bKeywordDict ) )
|| ( m_tSettings.m_iMinInfixLen && iWordLen< m_tSettings.m_iMinInfixLen ) ) )
return false;
// no checkpoints - check all words
// no checkpoints matched - check only words prior to 1st checkpoint
// checkpoint found - check words at that checkpoint
RtWordReader_c tReader ( pCurSeg, m_bKeywordDict, m_iWordsCheckpoint, m_tSettings.m_eHitless );
if ( pCurSeg->m_dWordCheckpoints.GetLength() )
{
const RtWordCheckpoint_t * pCp = m_bKeywordDict
? sphSearchCheckpointWrd ( sWord, iWordLen, false, pCurSeg->m_dWordCheckpoints )
: sphSearchCheckpointCrc ( uWordID, pCurSeg->m_dWordCheckpoints );
const BYTE * pWords = pCurSeg->m_dWords.Begin();
if ( !pCp )
{
tReader.m_pMax = pWords + pCurSeg->m_dWordCheckpoints.Begin()->m_iOffset;
} else
{
tReader.m_pCur = pWords + pCp->m_iOffset;
// if next checkpoint exists setup reader range
if ( ( pCp+1 )<= ( &pCurSeg->m_dWordCheckpoints.Last() ) )
tReader.m_pMax = pWords + pCp[1].m_iOffset;
}
}
// find the word between checkpoints
if ( m_bKeywordDict )
{
int iCmp = 0;
do {
if ( !tReader.UnzipWord() )
return false;
iCmp = sphDictCmpStrictly ( (const char*)tReader->m_sWord + 1, tReader->m_sWord[0], sWord, iWordLen );
} while ( iCmp < 0 );
if ( !!iCmp )
return false;
assert ( !iCmp );
const auto* pWord = (const RtWord_t*)tReader;
pQword->m_iDocs += pWord->m_uDocs;
pQword->m_iHits += pWord->m_uHits;
pQword->m_bHasHitlist &= pWord->m_bHasHitlist;
if ( bSetup )
pQword->SetupReader ( pCurSeg, *pWord );
return true;
}
assert ( !m_bKeywordDict );
do
if ( !tReader.UnzipWord() )
return false;
while ( tReader->m_uWordID < uWordID );
if ( tReader->m_uWordID>uWordID )
return false;
const auto* pWord = (const RtWord_t*)tReader;
pQword->m_iDocs += pWord->m_uDocs;
pQword->m_iHits += pWord->m_uHits;
pQword->m_bHasHitlist &= pWord->m_bHasHitlist;
if ( bSetup )
pQword->SetupReader ( pCurSeg, *pWord );
return true;
}
struct RtExpandedEntry_t
{
DWORD m_uHash;
int m_iNameOff;
int m_iDocs;
int m_iHits;
};
struct RtExpandedPayload_t
{
int m_iDocs;
int m_iHits;
DWORD m_uDoclistOff;
};
struct RtExpandedTraits_fn
{
inline bool IsLess ( const RtExpandedEntry_t & a, const RtExpandedEntry_t & b ) const
{
assert ( m_sBase );
if ( a.m_uHash!=b.m_uHash )
{
return a.m_uHash<b.m_uHash;
} else
{
const BYTE * pA = m_sBase + a.m_iNameOff;
const BYTE * pB = m_sBase + b.m_iNameOff;
if ( pA[0]!=pB[0] )
return pA[0]<pB[0];
return ( sphDictCmp ( (const char *)pA+1, pA[0], (const char *)pB+1, pB[0] )<0 );
}
}
inline bool IsEqual ( const RtExpandedEntry_t * a, const RtExpandedEntry_t * b ) const
{
assert ( m_sBase );
if ( a->m_uHash!=b->m_uHash )
return false;
const BYTE * pA = m_sBase + a->m_iNameOff;
const BYTE * pB = m_sBase + b->m_iNameOff;
if ( pA[0]!=pB[0] )
return false;
return ( sphDictCmp ( (const char *)pA+1, pA[0], (const char *)pB+1, pB[0] )==0 );
}
explicit RtExpandedTraits_fn ( const BYTE * sBase )
: m_sBase ( sBase )
{ }
const BYTE * m_sBase;
};
struct DictEntryRtPayload_t : public DictTerm2Expanded_i
{
DictEntryRtPayload_t ( bool bPayload, int iSegments )
{
m_bPayload = bPayload;
m_iSegExpansionLimit = iSegments;
if ( bPayload )
{
m_dWordPayload.Reserve ( 1000 );
m_dSeg.Resize ( iSegments );
ARRAY_FOREACH ( i, m_dSeg )
{
m_dSeg[i].m_uOff = 0;
m_dSeg[i].m_uLen = 0;
}
}
m_dWordExpand.Reserve ( 1000 );
m_dWordBuf.Reserve ( 8096 );
}
void Add ( const RtWord_t * pWord, int iSegment )
{
if ( !m_bPayload || !sphIsExpandedPayload ( pWord->m_uDocs, pWord->m_uHits ) )
{
RtExpandedEntry_t & tExpand = m_dWordExpand.Add();
int iOff = m_dWordBuf.GetLength();
int iWordLen = pWord->m_sWord[0] + 1;
tExpand.m_uHash = sphCRC32 ( pWord->m_sWord, iWordLen );
tExpand.m_iNameOff = iOff;
tExpand.m_iDocs = pWord->m_uDocs;
tExpand.m_iHits = pWord->m_uHits;
m_dWordBuf.Append ( pWord->m_sWord, iWordLen );
} else
{
RtExpandedPayload_t & tExpand = m_dWordPayload.Add();
tExpand.m_iDocs = pWord->m_uDocs;
tExpand.m_iHits = pWord->m_uHits;
tExpand.m_uDoclistOff = pWord->m_uDoc;
m_dSeg[iSegment].m_uOff = m_dWordPayload.GetLength();
m_dSeg[iSegment].m_uLen++;
}
}
void Convert ( ISphWordlist::Args_t & tArgs )
{
if ( !m_dWordExpand.GetLength() && !m_dWordPayload.GetLength() )
return;
int iTotalDocs = 0;
int iTotalHits = 0;
if ( !m_dWordExpand.IsEmpty() )
{
int iRtExpansionLimit = tArgs.m_iExpansionLimit * m_iSegExpansionLimit;
if ( tArgs.m_iExpansionLimit && m_dWordExpand.GetLength()>iRtExpansionLimit )
{
// sort expansions by frequency desc
// clip the less frequent ones if needed, as they are likely misspellings
sphSort ( m_dWordExpand.Begin(), m_dWordExpand.GetLength(), ExpandedOrderDesc_T<RtExpandedEntry_t>() );
m_dWordExpand.Resize ( iRtExpansionLimit );
}
// lets merge statistics for same words from different segments
// as hash produce a lot tiny allocations here
const BYTE * sBase = m_dWordBuf.Begin();
RtExpandedTraits_fn fnCmp ( sBase );
sphSort ( m_dWordExpand.Begin(), m_dWordExpand.GetLength(), fnCmp );
const RtExpandedEntry_t * pLast = m_dWordExpand.Begin();
tArgs.AddExpanded ( sBase+pLast->m_iNameOff+1, sBase[pLast->m_iNameOff], pLast->m_iDocs, pLast->m_iHits );
iTotalDocs += pLast->m_iDocs;
iTotalHits += pLast->m_iHits;
for ( int i=1; i<m_dWordExpand.GetLength(); ++i )
{
const RtExpandedEntry_t * pCur = m_dWordExpand.Begin() + i;
if ( fnCmp.IsEqual ( pLast, pCur ) )
{
tArgs.m_dExpanded.Last().m_iDocs += pCur->m_iDocs;
tArgs.m_dExpanded.Last().m_iHits += pCur->m_iHits;
} else
{
tArgs.AddExpanded ( sBase + pCur->m_iNameOff + 1, sBase[pCur->m_iNameOff],
pCur->m_iDocs, pCur->m_iHits );
pLast = pCur;
}
iTotalDocs += pCur->m_iDocs;
iTotalHits += pCur->m_iHits;
}
tArgs.m_tExpansionStats.m_iTerms += m_dWordExpand.GetLength();
}
if ( m_dWordPayload.GetLength() )
{
DWORD uExpansionLimit = tArgs.m_iExpansionLimit;
int iPayloads = 0;
for ( auto& tSeg: m_dSeg )
{
// reverse per segment offset to payload doc-list as offset was the end instead of start
assert ( tSeg.m_uOff>=tSeg.m_uLen );
tSeg.m_uOff = tSeg.m_uOff - tSeg.m_uLen;
// per segment expansion limit clip
if ( uExpansionLimit && tSeg.m_uLen>uExpansionLimit )
{
// sort expansions by frequency desc
// per segment clip the less frequent ones if needed, as they are likely misspellings
sphSort ( m_dWordPayload.Begin()+tSeg.m_uOff, tSeg.m_uLen,
ExpandedOrderDesc_T<RtExpandedPayload_t>() );
tSeg.m_uLen = uExpansionLimit;
}
iPayloads += tSeg.m_uLen;
// sort by ascending doc-list offset
sphSort ( m_dWordPayload.Begin()+tSeg.m_uOff, tSeg.m_uLen,
bind ( &RtExpandedPayload_t::m_uDoclistOff ) );
}
std::unique_ptr<RtSubstringPayload_t> pPayload ( new RtSubstringPayload_t ( m_dSeg.GetLength(), iPayloads ) );
Slice_t * pDst = pPayload->m_dDoclist.Begin();
ARRAY_FOREACH ( i, m_dSeg )
{
const Slice_t & tSeg = m_dSeg[i];
const RtExpandedPayload_t * pSrc = m_dWordPayload.Begin() + tSeg.m_uOff;
const RtExpandedPayload_t * pEnd = pSrc + tSeg.m_uLen;
pPayload->m_dSegment2Doclists[i].m_uOff = pPayload->m_dDoclist.Idx(pDst);
pPayload->m_dSegment2Doclists[i].m_uLen = tSeg.m_uLen;
while ( pSrc!=pEnd )
{
pDst->m_uOff = pSrc->m_uDoclistOff;
pDst->m_uLen = pSrc->m_iDocs;
iTotalDocs += pSrc->m_iDocs;
iTotalHits += pSrc->m_iHits;
++pDst;
++pSrc;
}
}
pPayload->m_iTotalDocs = iTotalDocs;
pPayload->m_iTotalHits = iTotalHits;
tArgs.m_pPayload = std::move ( pPayload );
tArgs.m_tExpansionStats.m_iMerged += iPayloads;
}
tArgs.m_iTotalDocs = iTotalDocs;
tArgs.m_iTotalHits = iTotalHits;
}
bool m_bPayload;
CSphVector<RtExpandedEntry_t> m_dWordExpand;
CSphVector<RtExpandedPayload_t> m_dWordPayload;
CSphVector<BYTE> m_dWordBuf;
CSphVector<Slice_t> m_dSeg;
int m_iSegExpansionLimit = 0;
};
void RtIndex_c::GetPrefixedWords ( const char * sSubstring, int iSubLen, const char * sWildcard, Args_t & tArgs ) const
{
int dWildcard [ SPH_MAX_WORD_LEN + 1 ];
int * pWildcard = ( sphIsUTF8 ( sWildcard ) && sphUTF8ToWideChar ( sWildcard, dWildcard, SPH_MAX_WORD_LEN ) ) ? dWildcard : nullptr;
const auto& dSegments = *(const RtSegVec_c*)tArgs.m_pIndexData.Ptr();
DictEntryRtPayload_t tDict2Payload ( tArgs.m_bPayload, dSegments.GetLength() );
const int iSkipMagic = ( BYTE(*sSubstring)<0x20 ); // whether to skip heading magic chars in the prefix, like NONSTEMMED maker
ARRAY_FOREACH ( iSeg, dSegments )
{
const RtSegment_t * pCurSeg = dSegments[iSeg];
RtWordReader_c tReader ( pCurSeg, true, m_iWordsCheckpoint, m_tSettings.m_eHitless );
// find initial checkpoint or check words prior to 1st checkpoint
if ( pCurSeg->m_dWordCheckpoints.GetLength() )
{
const RtWordCheckpoint_t * pCurCheckpoint = sphSearchCheckpointWrd( sSubstring, iSubLen, true, pCurSeg->m_dWordCheckpoints );
if ( pCurCheckpoint )
{
// there could be valid data prior 1st checkpoint that should be unpacked and checked
auto iCheckpointNameLen = (int) strlen ( pCurCheckpoint->m_szWord );
if ( pCurCheckpoint!=pCurSeg->m_dWordCheckpoints.Begin()
|| ( sphDictCmp ( sSubstring, iSubLen, pCurCheckpoint->m_szWord, iCheckpointNameLen )==0 && iSubLen==iCheckpointNameLen ) )
{
tReader.m_pCur = pCurSeg->m_dWords.Begin() + pCurCheckpoint->m_iOffset;
}
}
}
// find the word between checkpoints
while ( tReader.UnzipWord() )
{
const auto* pWord = (const RtWord_t*)tReader;
int iCmp = sphDictCmp ( sSubstring, iSubLen, (const char *)pWord->m_sWord+1, pWord->m_sWord[0] );
if ( iCmp<0 )
break;
else if ( iCmp==0 && iSubLen<=pWord->m_sWord[0] && sphWildcardMatch ( (const char *)pWord->m_sWord+1+iSkipMagic, sWildcard, pWildcard ) )
tDict2Payload.Add ( pWord, iSeg );
// FIXME!!! same case 'boxi*' matches 'box' document at plain index
// but masked by a checkpoint search
}
}
tDict2Payload.Convert ( tArgs );
}
bool ExtractInfixCheckpoints ( const char * sInfix, int iBytes, int iMaxCodepointLength, int iDictCpCount, const CSphTightVector<uint64_t> & dFilter, CSphVector<DWORD> & dCheckpoints )
{
if ( !dFilter.GetLength() )
return false;
int iStart = dCheckpoints.GetLength();
uint64_t dVals[ BLOOM_PER_ENTRY_VALS_COUNT * BLOOM_HASHES_COUNT ];
memset ( dVals, 0, sizeof(dVals) );
BloomGenTraits_t tBloom0 ( dVals );
BloomGenTraits_t tBloom1 ( dVals+BLOOM_PER_ENTRY_VALS_COUNT );
if ( !BuildBloom ( (const BYTE *)sInfix, iBytes, BLOOM_NGRAM_0, ( iMaxCodepointLength>1 ), BLOOM_PER_ENTRY_VALS_COUNT, tBloom0 ) )
return false;
BuildBloom ( (const BYTE *)sInfix, iBytes, BLOOM_NGRAM_1, ( iMaxCodepointLength>1 ), BLOOM_PER_ENTRY_VALS_COUNT, tBloom1 );
for ( int iDictCp=0; iDictCp<iDictCpCount+1; iDictCp++ )
{
const uint64_t * pCP = dFilter.Begin() + iDictCp * BLOOM_PER_ENTRY_VALS_COUNT * BLOOM_HASHES_COUNT;
const uint64_t * pSuffix = dVals;
bool bMatched = true;
for ( int iElem=0; iElem<BLOOM_PER_ENTRY_VALS_COUNT*BLOOM_HASHES_COUNT; iElem++ )
{
uint64_t uFilter = *pCP++;
uint64_t uSuffix = *pSuffix++;
if ( ( uFilter & uSuffix )!=uSuffix )
{
bMatched = false;
break;
}
}
if ( bMatched )
dCheckpoints.Add ( (DWORD)iDictCp );
}
return ( dCheckpoints.GetLength()!=iStart );
}
void RtIndex_c::GetInfixedWords ( const char * sSubstring, int iSubLen, const char * sWildcard, Args_t & tArgs ) const
{
// sanity checks
if ( !sSubstring || iSubLen<=0 )
return;
// find those prefixes
CSphVector<DWORD> dPoints;
const int iSkipMagic = ( tArgs.m_bHasExactForms ? 1 : 0 ); // whether to skip heading magic chars in the prefix, like NONSTEMMED maker
const auto& dSegments = *(const RtSegVec_c*)tArgs.m_pIndexData.Ptr();
DictEntryRtPayload_t tDict2Payload ( tArgs.m_bPayload, dSegments.GetLength() );
ARRAY_FOREACH ( iSeg, dSegments )
{
const RtSegment_t * pSeg = dSegments[iSeg];
if ( !pSeg->m_dWords.GetLength() )
continue;
dPoints.Resize ( 0 );
if ( !ExtractInfixCheckpoints ( sSubstring, iSubLen, m_iMaxCodepointLength, pSeg->m_dWordCheckpoints.GetLength(), pSeg->m_dInfixFilterCP, dPoints ) )
continue;
int dWildcard [ SPH_MAX_WORD_LEN + 1 ];
int * pWildcard = ( sphIsUTF8 ( sWildcard ) && sphUTF8ToWideChar ( sWildcard, dWildcard, SPH_MAX_WORD_LEN ) ) ? dWildcard : nullptr;
// walk those checkpoints, check all their words
for ( DWORD uPoint : dPoints )
{
auto iNext = (int)uPoint;
auto iCur = iNext-1;
RtWordReader_c tReader ( pSeg, true, m_iWordsCheckpoint, m_tSettings.m_eHitless );
if ( iCur>=0 )
tReader.m_pCur = pSeg->m_dWords.Begin() + pSeg->m_dWordCheckpoints[iCur].m_iOffset;
if ( iNext<pSeg->m_dWordCheckpoints.GetLength() )
tReader.m_pMax = pSeg->m_dWords.Begin() + pSeg->m_dWordCheckpoints[iNext].m_iOffset;
while ( tReader.UnzipWord() )
{
if ( tArgs.m_bHasExactForms && tReader->m_sWord[1]!=MAGIC_WORD_HEAD_NONSTEMMED )
continue;
// check it
if ( !sphWildcardMatch ( (const char*)tReader->m_sWord+1+iSkipMagic, sWildcard, pWildcard ) )
continue;
// matched, lets add
tDict2Payload.Add ( (const RtWord_t*)tReader, iSeg );
}
}
}
tDict2Payload.Convert ( tArgs );
}
#if WITH_RE2
struct RtRegexMatch_t
{
std::unique_ptr<RE2> m_pRe { nullptr };
std::unique_ptr<DictEntryRtPayload_t> m_pPayload { nullptr };
};
#endif
void RtIndex_c::ScanRegexWords ( const VecTraits_T<RegexTerm_t> & dTerms, const ISphWordlist::Args_t & tArgs, const VecExpandConv_t & dConverters ) const
{
assert ( dTerms.GetLength() && dTerms.GetLength()==dConverters.GetLength() );
#if WITH_RE2
const auto & dSegments = *(const RtSegVec_c*)tArgs.m_pIndexData.Ptr();
CSphFixedVector<RtRegexMatch_t> dRegex ( dTerms.GetLength() );
RE2::Options tOptions;
tOptions.set_encoding ( RE2::Options::Encoding::EncodingUTF8 );
ARRAY_FOREACH ( i, dRegex )
{
dRegex[i].m_pRe = std::make_unique<RE2> ( dTerms[i].first.cstr(), tOptions );
dRegex[i].m_pPayload = std::make_unique<DictEntryRtPayload_t> ( tArgs.m_bPayload, dSegments.GetLength() );
assert ( dRegex[i].m_pRe && dRegex[i].m_pPayload );
}
ARRAY_FOREACH ( iSeg, dSegments )
{
const RtSegment_t * pCurSeg = dSegments[iSeg];
RtWordReader_c tReader ( pCurSeg, true, m_iWordsCheckpoint, m_tSettings.m_eHitless );
while ( tReader.UnzipWord() )
{
const BYTE * pDictWord = tReader->m_sWord+1;
// stemmed terms should not match suffixes
if ( tArgs.m_bHasExactForms && *pDictWord!=MAGIC_WORD_HEAD_NONSTEMMED )
continue;
int iLen = tReader->m_sWord[0];
if ( *pDictWord<0x20 ) // anyway skip heading magic chars in the prefix, like NONSTEMMED maker
{
pDictWord++;
iLen--;
}
re2::StringPiece sDictToken ( (const char *)pDictWord, iLen );
ARRAY_FOREACH ( i, dRegex )
{
if ( RE2::FullMatchN ( sDictToken, *dRegex[i].m_pRe, nullptr, 0 ) )
dRegex[i].m_pPayload->Add ( (const RtWord_t *)tReader, iSeg );
}
}
}
ARRAY_FOREACH ( i, dRegex )
dConverters[i] = std::move( dRegex[i].m_pPayload );
#endif
}
void RtIndex_c::GetSuggest ( const SuggestArgs_t & tArgs, SuggestResult_t & tRes ) const
{
auto tGuard = RtGuard();
const auto& dSegments = tGuard.m_dRamSegs;
// segments and disk chunks dictionaries produce duplicated entries
tRes.m_bMergeWords = true;
if ( dSegments.GetLength() )
{
assert ( !tRes.m_pWordReader && !tRes.m_pSegments );
auto pReader = std::make_unique<RtWordReader_c> ( dSegments[0], true, m_iWordsCheckpoint, m_tSettings.m_eHitless );
tRes.m_pWordReader = pReader.get();
tRes.m_pSegments = tGuard.m_tSegmentsAndChunks.m_pSegs;
tRes.m_bHasExactDict = m_tSettings.m_bIndexExactWords;
// FIXME!!! cache InfixCodepointBytes as it is slow - GetMaxCodepointLength is charset_table traverse
sphGetSuggest ( this, m_pTokenizer->GetMaxCodepointLength(), tArgs, tRes );
tRes.m_pWordReader = nullptr;
tRes.m_pSegments = nullptr;
}
int iWorstCount = 0;
auto& dDiskChunks = tGuard.m_dDiskChunks;
// check disk chunks from recent to oldest
for ( int i = dDiskChunks.GetLength() - 1; i >= 0; --i )
{
int iWorstDist = 0;
int iWorstDocs = 0;
if ( tRes.m_dMatched.GetLength() )
{
iWorstDist = tRes.m_dMatched.Last().m_iDistance;
iWorstDocs = tRes.m_dMatched.Last().m_iDocs;
}
dDiskChunks[i]->Cidx().GetSuggest ( tArgs, tRes );
// stop checking in case worst element is same several times during loop
if ( tRes.m_dMatched.GetLength() && iWorstDist==tRes.m_dMatched.Last().m_iDistance && iWorstDocs==tRes.m_dMatched.Last().m_iDocs )
{
iWorstCount++;
if ( iWorstCount>2 )
break;
} else
{
iWorstCount = 0;
}
}
}
void RtIndex_c::SuffixGetChekpoints ( const SuggestResult_t & tRes, const char * sSuffix, int iLen, CSphVector<DWORD> & dCheckpoints ) const
{
const auto& dSegments = *(const RtSegVec_c*)tRes.m_pSegments.Ptr();
assert ( dSegments.GetLength()<0xFF );
ARRAY_FOREACH ( iSeg, dSegments )
{
const RtSegment_t * pSeg = dSegments[iSeg];
if ( !pSeg->m_dWords.GetLength () )
continue;
int iStart = dCheckpoints.GetLength();
if ( !ExtractInfixCheckpoints ( sSuffix, iLen, m_iMaxCodepointLength, pSeg->m_dWordCheckpoints.GetLength(), pSeg->m_dInfixFilterCP, dCheckpoints ) )
continue;
DWORD iSegPacked = (DWORD)iSeg<<24;
for ( int i=iStart; i<dCheckpoints.GetLength(); i++ )
{
assert ( ( dCheckpoints[i] & 0xFFFFFF )==dCheckpoints[i] );
dCheckpoints[i] |= iSegPacked;
}
}
}
void RtIndex_c::SetCheckpoint ( SuggestResult_t & tRes, DWORD iCP ) const
{
assert ( tRes.m_pWordReader && tRes.m_pSegments );
const auto& dSegments = *(const RtSegVec_c*)tRes.m_pSegments.Ptr();
auto* pReader = (RtWordReader_c*)tRes.m_pWordReader;
int iSeg = iCP>>24;
assert ( iSeg>=0 && iSeg<dSegments.GetLength() );
const RtSegment_t * pSeg = dSegments[iSeg];
pReader->Reset ( pSeg );
int iNext = (int)( iCP & 0xFFFFFF );
int iCur = iNext-1;
if ( iCur>0 )
pReader->m_pCur = pSeg->m_dWords.Begin() + pSeg->m_dWordCheckpoints[iCur].m_iOffset;
if ( iNext<pSeg->m_dWordCheckpoints.GetLength() )
pReader->m_pMax = pSeg->m_dWords.Begin() + pSeg->m_dWordCheckpoints[iNext].m_iOffset;
}
bool RtIndex_c::ReadNextWord ( SuggestResult_t & tRes, DictWord_t & tWord ) const
{
assert ( tRes.m_pWordReader );
auto& tReader = *(RtWordReader_c*)tRes.m_pWordReader;
if ( !tReader.UnzipWord() )
return false;
const auto* pWord = (const RtWord_t*)tReader;
tWord.m_sWord = (const char *)( pWord->m_sWord + 1 );
tWord.m_iLen = pWord->m_sWord[0];
tWord.m_iDocs = pWord->m_uDocs;
return true;
}
bool RtIndex_c::RtQwordSetup ( RtQword_t * pQword, int iSeg, const RtGuard_t& tGuard ) const
{
// segment-specific setup pass
if ( iSeg>=0 )
return RtQwordSetupSegment ( pQword, tGuard.m_dRamSegs[iSeg], true );
// stat-only pass
// loop all segments, gather stats, do not setup anything
pQword->m_iDocs = 0;
pQword->m_iHits = 0;
if ( tGuard.m_dRamSegs.IsEmpty() )
return true;
// we care about the results anyway though
// because if all (!) segments miss this word, we must notify the caller, right?
bool bFound = false;
for ( const auto& pSeg : tGuard.m_dRamSegs )
bFound |= RtQwordSetupSegment ( pQword, pSeg, false );
// sanity check
assert (!( bFound==true && pQword->m_iDocs==0 ) );
return bFound;
}
void SetupExactTokenizer ( const TokenizerRefPtr_c & pTokenizer )
{
assert ( pTokenizer );
pTokenizer->AddSpecials ( "=" );
}
void SetupStarTokenizer ( const TokenizerRefPtr_c & pTokenizer )
{
assert ( pTokenizer );
pTokenizer->AddPlainChars ( "*" );
}
class SphRtFinalMatchCalc_c : public MatchProcessor_i, ISphNoncopyable
{
public:
SphRtFinalMatchCalc_c ( int iSegments, const CSphQueryContext & tCtx )
: m_tCtx ( tCtx )
, m_iSegments ( iSegments )
{
m_dSegments.Init ( iSegments );
}
bool NextSegment ( int iSeg )
{
m_iSeg = iSeg;
bool bSegmentGotRows = m_dSegments.BitGet ( iSeg );
// clear current row
m_dSegments.BitClear ( iSeg );
// also clear 0 segment as it got forced to process
m_dSegments.BitClear ( 0 );
// also force to process 0 segment to mark all used segments
return ( iSeg==0 || bSegmentGotRows );
}
bool HasSegments () const { return ( m_iSeg==0 || m_dSegments.BitCount()>0 ); }
void Process ( CSphMatch * pMatch ) final { ProcessMatch ( pMatch ); }
void Process ( VecTraits_T<CSphMatch *> & dMatches ) final { dMatches.for_each ( [this]( CSphMatch * pMatch ){ ProcessMatch(pMatch); } ); }
bool ProcessInRowIdOrder() const final { return m_tCtx.m_dCalcFinal.any_of ( []( const ContextCalcItem_t & i ){ return i.m_pExpr && i.m_pExpr->IsColumnar(); } ); }
private:
const CSphQueryContext & m_tCtx;
int m_iSeg = 0;
int m_iSegments;
// count per segments matches
// to skip iteration of matches at sorter and pool setup for segment without matches at sorter
CSphBitvec m_dSegments;
inline void ProcessMatch ( CSphMatch * pMatch )
{
int iMatchSegment = pMatch->m_iTag-1;
if ( iMatchSegment==m_iSeg )
m_tCtx.CalcFinal ( *pMatch );
// count all used segments at 0 pass
if ( m_iSeg==0 && iMatchSegment<m_iSegments )
m_dSegments.BitSet ( iMatchSegment );
}
};
class SorterSchemaTransform_c
{
public:
SorterSchemaTransform_c ( int iNumChunks, bool bFinalizeSorters );
void Set ( int iChunk, const CSphQueryResult & tChunkResult ) { m_dDiskChunkData[iChunk].Set(tChunkResult); }
void Transform ( ISphMatchSorter * pSorter, const RtGuard_t& tGuard );
private:
struct DiskChunkData_t
{
const BYTE * m_pBlobPool;
columnar::Columnar_i * m_pColumnar;
void Set ( const CSphQueryResult & tChunkResult )
{
m_pBlobPool = tChunkResult.m_pBlobPool;
m_pColumnar = tChunkResult.m_pColumnar;
}
};
CSphVector<DiskChunkData_t> m_dDiskChunkData;
bool m_bFinalizeSorters = false;
};
SorterSchemaTransform_c::SorterSchemaTransform_c ( int iNumChunks, bool bFinalizeSorters )
: m_bFinalizeSorters ( bFinalizeSorters )
{
m_dDiskChunkData.Resize(iNumChunks);
}
void SorterSchemaTransform_c::Transform ( ISphMatchSorter * pSorter, const RtGuard_t& tGuard )
{
assert(pSorter);
CSphBitvec tLockedSegs ( tGuard.m_dRamSegs.GetLength() );
auto tUnlockChunks = AtScopeExit ( [&tGuard, &tLockedSegs]() NO_THREAD_SAFETY_ANALYSIS {
ARRAY_FOREACH ( i, tGuard.m_dRamSegs )
if ( tLockedSegs.BitGet ( i ) )
tGuard.m_dRamSegs[i]->m_tLock.Unlock();
});
auto fnGetBlobPoolFromMatch = [&tGuard,&tLockedSegs,this] ( const CSphMatch * pMatch ) NO_THREAD_SAFETY_ANALYSIS
{
int nRamChunks = tGuard.m_dRamSegs.GetLength ();
int iChunkId = pMatch->m_iTag-1;
if ( iChunkId>=nRamChunks )
return m_dDiskChunkData[iChunkId - nRamChunks].m_pBlobPool;
if ( !tLockedSegs.BitGet ( iChunkId ) )
{
tLockedSegs.BitSet ( iChunkId );
tGuard.m_dRamSegs[iChunkId]->m_tLock.ReadLock();
}
return (const BYTE *) tGuard.m_dRamSegs[iChunkId]->m_dBlobs.Begin();
};
auto fnGetColumnarFromMatch = [&tGuard,this] ( const CSphMatch * pMatch ) -> columnar::Columnar_i *
{
int nRamChunks = tGuard.m_dRamSegs.GetLength ();
int iChunkId = pMatch->m_iTag-1;
if ( iChunkId<nRamChunks )
return tGuard.m_dRamSegs[iChunkId]->m_pColumnar.get();
return m_dDiskChunkData[iChunkId-nRamChunks].m_pColumnar;
};
pSorter->TransformPooled2StandalonePtrs ( fnGetBlobPoolFromMatch, fnGetColumnarFromMatch, m_bFinalizeSorters );
}
static int64_t CalcMaxCountDistinct ( const CSphQuery & tQuery, const RtGuard_t & tGuard )
{
if ( tQuery.m_sGroupBy.IsEmpty() )
return -1;
int64_t iMaxCountDistinct = -1;
for ( auto & i : tGuard.m_dDiskChunks )
{
int iGroupby = GetAliasedAttrIndex ( tQuery.m_sGroupBy, tQuery, i->Cidx().GetMatchSchema() );
if ( iGroupby>=0 )
{
CSphString sModifiedAttr;
int64_t iCountDistinct = i->Cidx().GetCountDistinct ( i->Cidx().GetMatchSchema().GetAttr(iGroupby).m_sName, sModifiedAttr );
if ( iCountDistinct==-1 )
return -1; // if one of the chunks doesn't have that info, we can't calculate max
iMaxCountDistinct = Max ( iCountDistinct, iMaxCountDistinct );
}
}
return iMaxCountDistinct;
}
static bool CalcDiskChunkSplits ( IntVec_t & dThreads, int iJobs, const CSphQuery & tQuery, const CSphMultiQueryArgs & tArgs, const RtGuard_t & tGuard )
{
CSphVector<SplitData_t> dSplitData ( iJobs );
int iMaxThreadsPerIndex = CalcMaxThreadsPerIndex ( tArgs.m_iThreads, iJobs );
int64_t iMaxCountDistinct = CalcMaxCountDistinct ( tQuery, tGuard );
ARRAY_FOREACH ( i, dSplitData )
{
bool bForceSingleThread = false;
auto tMetric = tGuard.m_dDiskChunks[i]->Cidx().GetPseudoShardingMetric ( { &tQuery, 1 }, { &iMaxCountDistinct, 1 }, iMaxThreadsPerIndex, bForceSingleThread );
if ( bForceSingleThread )
return false;
assert ( tMetric.first>=0 );
auto & tSplitData = dSplitData[i];
tSplitData.m_iMetric = tMetric.first;
tSplitData.m_iThreadCap = tQuery.m_iConcurrency ? 0 : tMetric.second; // ignore thread cap if concurrency is explicitly specified
}
// we have more free threads than disk chunks; makes sense to apply pseudo_sharding
if ( tArgs.m_iThreads>iJobs )
DistributeThreadsOverIndexes ( dThreads, dSplitData, tArgs.m_iThreads );
return true;
}
static bool QueryDiskChunks ( const CSphQuery & tQuery, CSphQueryResultMeta & tResult, const CSphMultiQueryArgs & tArgs, const RtGuard_t & tGuard, VecTraits_T<ISphMatchSorter *> & dSorters, QueryProfile_c * pProfiler, bool bGotLocalDF, const SmallStringHash_T<int64_t> * pLocalDocs, int64_t iTotalDocs, const char * szIndexName, SorterSchemaTransform_c & tSSTransform, int64_t tmMaxTimer )
{
// counter of tasks we will issue now
int iJobs = tGuard.m_dDiskChunks.GetLength();
if ( !iJobs )
return true;
assert ( !dSorters.IsEmpty () );
auto tDispatch = GetEffectiveBaseDispatcherTemplate();
Dispatcher::Unify ( tDispatch, tQuery.m_tMainDispatcher );
// the context
ClonableCtx_T<DiskChunkSearcherCtx_t, DiskChunkSearcherCloneCtx_t, Threads::ECONTEXT::ORDERED> tClonableCtx { dSorters, tResult };
// because disk chunk search within the loop will switch the profiler state
SwitchProfile ( pProfiler, SPH_QSTATE_INIT );
// uninitilized dSplits (due to tClonableCtx.IsSingle()) could still cause the SplitQuery code path at the CSphIndex_VLN::MultiQuery as
// dSplits[iChunk] -> tMultiArgs.m_iThreads
// then at the m_dDiskChunks
// if ( tArgs.m_iThreads>1 ) return SplitQuery
IntVec_t dSplits {iJobs};
dSplits.Fill(1); // moved here from CalcDiskChunkSplits to prevent short-cuts
auto pDispatcher = Dispatcher::Make ( iJobs, tArgs.m_iThreads, tDispatch, tClonableCtx.IsSingle() || !CalcDiskChunkSplits ( dSplits, iJobs, tQuery, tArgs, tGuard ));
const int iThreads = pDispatcher->GetConcurrency();
tClonableCtx.LimitConcurrency ( iThreads );
auto iStart = sphMicroTimer();
RTQUERYINFO << "Started: " << ( sphMicroTimer()-iStart );
std::atomic<bool> bInterrupt { false };
std::atomic<int> bSucceed { 1 };
auto CheckInterrupt = [&bInterrupt]() { return bInterrupt.load ( std::memory_order_relaxed ); };
Coro::ExecuteN ( tClonableCtx.Concurrency ( iJobs ), [&]
{
auto pSource = pDispatcher->MakeSource();
int iJob = -1; // make it consumed
if ( !pSource->FetchTask ( iJob ) || CheckInterrupt() )
{
RTQUERYINFO << "Early finish parallel QueryDiskChunks because of empty queue";
return; // already nothing to do, early finish.
}
auto tJobContext = tClonableCtx.CloneNewContext ( !iJob );
auto& tCtx = tJobContext.first;
auto Interrupt = [&bInterrupt, &tCtx] ( const char* szReason ) {
tCtx.m_tMeta.m_sWarning = szReason;
bInterrupt.store ( true, std::memory_order_relaxed );
};
RTQUERYINFO << "QueryDiskChunks cloned context " << tJobContext.second << " (job " << iJob << ")";
tClonableCtx.SetJobOrder ( tJobContext.second, -iJob ); // fixme! Same as in single search, but here we walk in reverse order. Need to fix?
Threads::Coro::SetThrottlingPeriodMS ( session::GetThrottlingPeriodMS() );
while ( !CheckInterrupt() ) // some earlier job met error; abort.
{
// jobs come in ascending order from 0 up to iJobs-1.
// We walk over disk chunk in reverse order, from last to 0-th.
auto iChunk = iJobs - iJob - 1;
RTQUERYINFO << "QueryDiskChunks " << tJobContext.second << ", Jb/Chunk: " << iJob << "/" << iChunk;
iJob = -1; // mark it consumed
myinfo::SetTaskInfo ( "%d ch %d:", Threads::Coro::NumOfRestarts(), iChunk );
auto & dLocalSorters = tCtx.m_dSorters;
CSphQueryResultMeta tChunkMeta;
CSphQueryResult tChunkResult;
tChunkResult.m_pMeta = &tChunkMeta;
CSphQueryResultMeta & tThMeta = tCtx.m_tMeta;
tChunkMeta.m_pProfile = tThMeta.m_pProfile;
CSphMultiQueryArgs tMultiArgs ( tArgs.m_iIndexWeight );
// storing index in matches tag for finding strings attrs offset later, biased against default zero and segments
tMultiArgs.m_iTag = tGuard.m_dRamSegs.GetLength ()+iChunk+1;
tMultiArgs.m_uPackedFactorFlags = tArgs.m_uPackedFactorFlags;
tMultiArgs.m_bLocalDF = bGotLocalDF;
tMultiArgs.m_pLocalDocs = pLocalDocs;
tMultiArgs.m_iTotalDocs = iTotalDocs;
tMultiArgs.m_iThreads = dSplits[iChunk];
tMultiArgs.m_iTotalThreads = iThreads;
// we use sorters in both disk chunks and ram chunks,
// that's why we don't want to move to a new schema before we searched ram chunks
tMultiArgs.m_bModifySorterSchemas = false;
bool bChunkSucceed = tGuard.m_dDiskChunks[iChunk]->Cidx().MultiQuery ( tChunkResult, tQuery, dLocalSorters, tMultiArgs ) ;
bSucceed.fetch_and ( bChunkSucceed );
if ( !bChunkSucceed )
Interrupt ( "" );
// check terms inconsistency among disk chunks
tThMeta.MergeWordStats ( tChunkMeta );
tThMeta.m_bHasPrediction |= tChunkMeta.m_bHasPrediction;
tSSTransform.Set ( iChunk, tChunkResult );
if ( tThMeta.m_bHasPrediction )
tThMeta.m_tStats.Add ( tChunkMeta.m_tStats );
if ( iChunk && sph::TimeExceeded ( tmMaxTimer ) )
Interrupt ( "query time exceeded max_query_time" );
if ( tThMeta.m_sWarning.IsEmpty() && !tChunkMeta.m_sWarning.IsEmpty() )
tThMeta.m_sWarning = tChunkMeta.m_sWarning;
tThMeta.m_bTotalMatchesApprox |= tChunkMeta.m_bTotalMatchesApprox;
tThMeta.m_tIteratorStats.Merge ( tChunkMeta.m_tIteratorStats );
if ( CheckInterrupt() && !tChunkMeta.m_sError.IsEmpty() )
// FIXME? maybe handle this more gracefully (convert to a warning)?
tThMeta.m_sError = tChunkMeta.m_sError;
if ( !pSource->FetchTask ( iJob ) || CheckInterrupt() )
return; // all is done
// yield and reschedule every quant of time. It gives work to other tasks
if ( Threads::Coro::RuntimeExceeded() )
{
if ( session::GetKilled() )
Interrupt ( "query was killed" );
else
Threads::Coro::RescheduleAndKeepCrashQuery();
}
}
});
RTQUERYINFO "QueryDiskChunks processed in " << tClonableCtx.NumWorked() << " thread(s)";
tClonableCtx.Finalize();
return bSucceed;
}
bool FinalExpressionCalculation ( CSphQueryContext & tCtx, const VecTraits_T<RtSegmentRefPtf_t> & dRamChunks, VecTraits_T<ISphMatchSorter *> & dSorters, bool bFinalizeSorters, CSphQueryResultMeta & tMeta )
{
if ( dSorters.any_of ( [&] ( ISphMatchSorter * p ) { return !p->FinalizeJoin ( tMeta.m_sError, tMeta.m_sWarning ); } ) )
return false;
if ( tCtx.m_dCalcFinal.IsEmpty() )
return true;
const int iSegmentsTotal = dRamChunks.GetLength ();
// at 0 pass processor also fills bitmask of segments these has matches at sorter
// then skip sorter processing for these 'empty' segments
SphRtFinalMatchCalc_c tFinal ( iSegmentsTotal, tCtx );
ARRAY_FOREACH_COND ( iSeg, dRamChunks, tFinal.HasSegments() )
{
if ( !tFinal.NextSegment ( iSeg ) )
continue;
// set blob pool for string on_sort expression fix up
SccRL_t rLock ( dRamChunks[iSeg]->m_tLock );
tCtx.SetBlobPool ( dRamChunks[iSeg]->m_dBlobs.Begin() );
tCtx.SetColumnar ( dRamChunks[iSeg]->m_pColumnar.get() );
tCtx.SetDocstore ( dRamChunks[iSeg]->m_pDocstore.get(), -1 ); // no need to create session/readers for RT segments
dSorters.Apply ( [&] ( ISphMatchSorter * pTop ) { pTop->Finalize ( tFinal, false, bFinalizeSorters ); } );
}
return true;
}
// perform initial query transformations and expansion.
static int PrepareFTSearch ( const RtIndex_c * pThis, bool bIsStarDict, bool bKeywordDict, int iExpandKeywords, int iExpansionLimit, const CSphIndexSettings & tSettings, const CSphQuery & tQuery, cRefCountedRefPtrGeneric_t pIndexData, const DictRefPtr_c& pDict, CSphQueryResultMeta & tMeta, QueryProfile_c * pProfiler, CSphScopedPayload * pPayloads, XQQuery_t & tParsed )
{
if ( !tParsed.m_sParseWarning.IsEmpty () )
tMeta.m_sWarning = tParsed.m_sParseWarning;
// transform query if needed (quorum transform, etc.)
SwitchProfile ( pProfiler, SPH_QSTATE_TRANSFORMS );
// FIXME!!! provide segments list instead index
sphTransformExtendedQuery ( &tParsed.m_pRoot, tSettings, tQuery.m_bSimplify, pThis );
int iExpandKw = ExpandKeywords ( iExpandKeywords, tQuery.m_eExpandKeywords, tSettings, bKeywordDict );
if ( iExpandKw!=KWE_DISABLED )
{
sphQueryExpandKeywords ( &tParsed.m_pRoot, tSettings, iExpandKw, bKeywordDict );
tParsed.m_pRoot->Check ( true );
}
// this should be after keyword expansion
TransformAotFilter ( tParsed.m_pRoot, pDict->GetWordforms (), tSettings );
// expanding prefix in word dictionary case
if ( bKeywordDict && bIsStarDict )
{
ExpansionContext_t tExpCtx;
tExpCtx.m_pWordlist = pThis;
tExpCtx.m_pBuf = nullptr;
tExpCtx.m_pResult = &tMeta;
tExpCtx.m_iMinPrefixLen = tSettings.GetMinPrefixLen ( bKeywordDict );
tExpCtx.m_iMinInfixLen = tSettings.m_iMinInfixLen;
tExpCtx.m_iExpansionLimit = GetExpansionLimit ( tQuery.m_iExpansionLimit, iExpansionLimit );
tExpCtx.m_bHasExactForms = ( pDict->HasMorphology () || tSettings.m_bIndexExactWords );
tExpCtx.m_bMergeSingles = ( tQuery.m_uDebugFlags & QUERY_DEBUG_NO_PAYLOAD )==0;
tExpCtx.m_pPayloads = pPayloads;
tExpCtx.m_pIndexData = std::move ( pIndexData );
tParsed.m_pRoot = sphExpandXQNode ( tParsed.m_pRoot, tExpCtx ); // here magics happens
if ( !ExpandRegex ( tExpCtx, tMeta.m_sError ) )
return 0;
tExpCtx.AggregateStats();
}
return ConsiderStack ( tParsed.m_pRoot, tMeta.m_sError );
}
static bool SetupFilters ( const CSphQuery & tQuery, const ISphSchema & tMatchSchema, const ISphSchema & tIndexSchema, bool bFullscan, CSphQueryContext & tCtx, CSphVector<CSphFilterSettings> & dTransformedFilters, CSphVector<FilterTreeItem_t> & dTransformedFilterTree, const CSphVector<const ISphSchema *> & dSorterSchemas, CSphQueryResultMeta & tMeta )
{
CreateFilterContext_t tFlx;
tFlx.m_pFilters = &tQuery.m_dFilters;
tFlx.m_pFilterTree = &tQuery.m_dFilterTree;
tFlx.m_pMatchSchema = &tMatchSchema;
tFlx.m_pIndexSchema = &tIndexSchema;
tFlx.m_eCollation = tQuery.m_eCollation;
tFlx.m_bScan = bFullscan;
tFlx.m_sJoinIdx = tQuery.m_sJoinIdx;
std::unique_ptr<ISphSchema> pModifiedMatchSchema;
if ( !TransformFilters ( tFlx, dTransformedFilters, dTransformedFilterTree, pModifiedMatchSchema, tQuery.m_dItems, tMeta.m_sError ) )
return false;
if ( pModifiedMatchSchema )
tFlx.m_pMatchSchema = pModifiedMatchSchema.get();
tFlx.m_pFilters = &dTransformedFilters;
tFlx.m_pFilterTree = dTransformedFilterTree.GetLength() ? &dTransformedFilterTree : nullptr;
if ( !tCtx.SetupCalc ( tMeta, *tFlx.m_pMatchSchema, tIndexSchema, nullptr, nullptr, dSorterSchemas ) )
return false;
return tCtx.CreateFilters ( tFlx, tMeta.m_sError, tMeta.m_sWarning );
}
static bool PerformFullscan ( const VecTraits_T<RtSegmentRefPtf_t> & dRamChunks, int iMaxDynamicSize, int iIndexWeight, int iStride, int iCutoff, int64_t tmMaxTimer, QueryProfile_c * pProfiler, CSphQueryContext & tCtx, VecTraits_T<ISphMatchSorter*> & dSorters, CSphString & sWarning )
{
if ( !iCutoff )
return true;
bool bRandomize = dSorters[0]->IsRandom();
SwitchProfile ( pProfiler, SPH_QSTATE_FULLSCAN );
// full scan
// FIXME? OPTIMIZE? add shortcuts here too?
CSphMatch tMatch;
tMatch.Reset ( iMaxDynamicSize );
tMatch.m_iWeight = iIndexWeight;
ARRAY_FOREACH ( iSeg, dRamChunks )
{
RtSegment_t & tSeg = *dRamChunks[iSeg];
SccRL_t rLock ( tSeg.m_tLock );
auto pBlobs = tSeg.m_dBlobs.Begin();
tCtx.SetBlobPool(pBlobs);
for ( auto * pSorter : dSorters )
pSorter->SetBlobPool(pBlobs);
auto pColumnar = tSeg.m_pColumnar.get();
tCtx.SetColumnar(pColumnar);
for ( auto * pSorter : dSorters )
pSorter->SetColumnar(pColumnar);
if ( tCtx.m_pFilter )
tCtx.m_pFilter->SetColumnar(pColumnar);
session::Info().m_pSessionOpaque2 = (void*)tSeg.m_pDocstore.get();
for ( auto tRowID : RtLiveRows_c(tSeg) )
{
tMatch.m_tRowID = tRowID;
tMatch.m_pStatic = tSeg.m_dRows.Begin() + (int64_t)tRowID*iStride;
tCtx.CalcFilter ( tMatch );
if ( tCtx.m_pFilter && !tCtx.m_pFilter->Eval ( tMatch ) )
{
tCtx.FreeDataFilter ( tMatch );
continue;
}
if ( bRandomize )
tMatch.m_iWeight = ( sphRand() & 0xffff ) * iIndexWeight;
tCtx.CalcSort ( tMatch );
// storing segment in matches tag for finding strings attrs offset later, biased against default zero
tMatch.m_iTag = iSeg+1;
bool bNewMatch = false;
for ( auto * pSorter: dSorters )
bNewMatch |= pSorter->Push ( tMatch );
// stringptr expressions should be duplicated (or taken over) at this point
tCtx.FreeDataFilter ( tMatch );
tCtx.FreeDataSort ( tMatch );
// handle cutoff
if ( bNewMatch )
if ( --iCutoff==0 )
return true;
// handle timer
if ( sph::TimeExceeded ( tmMaxTimer ) )
{
sWarning = "query time exceeded max_query_time";
return true;
}
if ( Threads::Coro::RuntimeExceeded() )
{
if ( session::GetKilled() )
{
sWarning = "query was killed";
return true;
}
Threads::Coro::RescheduleAndKeepCrashQuery();
}
}
}
return false;
}
static bool DoFullScanQuery ( const RtSegVec_c & dRamChunks, const ISphSchema & tMaxSorterSchema, const ISphSchema & tIndexSchema, const CSphQuery & tQuery, const CSphMultiQueryArgs & tArgs, int iStride, int64_t tmMaxTimer, QueryProfile_c * pProfiler, CSphQueryContext & tCtx, VecTraits_T<ISphMatchSorter*> & dSorters, CSphQueryResultMeta & tMeta )
{
// probably redundant, but just in case
SwitchProfile ( pProfiler, SPH_QSTATE_INIT );
// search segments no looking to max_query_time
// FIXME!!! move searching at segments before disk chunks as result set is safe with kill-lists
if ( !dRamChunks.IsEmpty () )
{
// FIXME! OPTIMIZE! check if we can early reject the whole index
int iCutoff = ApplyImplicitCutoff ( tQuery, dSorters, false );
tMeta.m_bTotalMatchesApprox |= PerformFullscan ( dRamChunks, tMaxSorterSchema.GetDynamicSize(), tArgs.m_iIndexWeight, iStride, iCutoff, tmMaxTimer, pProfiler, tCtx, dSorters, tMeta.m_sWarning );
}
return FinalExpressionCalculation ( tCtx, dRamChunks, dSorters, tArgs.m_bFinalizeSorters, tMeta );
}
static void PerformFullTextSearch ( const RtSegVec_c & dRamChunks, RtQwordSetup_t & tTermSetup, ISphRanker * pRanker, int iIndexWeight, int iCutoff, QueryProfile_c * pProfiler, CSphQueryContext & tCtx, VecTraits_T<ISphMatchSorter*> & dSorters )
{
if ( !iCutoff )
return;
bool bRandomize = dSorters[0]->IsRandom();
// query matching
ARRAY_FOREACH ( iSeg, dRamChunks )
{
const RtSegment_t * pSeg = dRamChunks[iSeg];
SccRL_t rLock ( pSeg->m_tLock );
SwitchProfile ( pProfiler, SPH_QSTATE_INIT_SEGMENT );
tTermSetup.SetSegment ( iSeg );
pRanker->Reset ( tTermSetup );
// for lookups to work
tCtx.m_pIndexData = pSeg;
// set blob pool for string on_sort expression fix up
const BYTE * pBlobPool = pSeg->m_dBlobs.Begin ();
tCtx.SetBlobPool ( pBlobPool );
for ( auto * pSorter : dSorters )
pSorter->SetBlobPool ( pBlobPool );
auto pColumnar = pSeg->m_pColumnar.get();
tCtx.SetColumnar(pColumnar);
for ( auto * pSorter : dSorters )
pSorter->SetColumnar(pColumnar);
if ( tCtx.m_pFilter )
tCtx.m_pFilter->SetColumnar(pColumnar);
// storing segment in matches tag for finding strings attrs offset later, biased against default zero
int iTag = iSeg+1;
if ( tCtx.m_uPackedFactorFlags & SPH_FACTOR_ENABLE )
pRanker->ExtraData ( EXTRA_SET_MATCHTAG, (void**)&iTag );
pRanker->ExtraData ( EXTRA_SET_BLOBPOOL, (void**)&pBlobPool );
pRanker->ExtraData ( EXTRA_SET_COLUMNAR, (void**)&pColumnar );
CSphMatch * pMatch = pRanker->GetMatchesBuffer();
while (true)
{
// ranker does profile switches internally in GetMatches()
int iMatches = pRanker->GetMatches();
if ( iMatches<=0 )
break;
SwitchProfile ( pProfiler, SPH_QSTATE_SORT );
for ( int i=0; i<iMatches; i++ )
{
CSphMatch & tMatch = pMatch[i];
tMatch.m_pStatic = pSeg->GetDocinfoByRowID ( tMatch.m_tRowID );
tMatch.m_iWeight *= iIndexWeight;
if ( bRandomize )
tMatch.m_iWeight = ( sphRand() & 0xffff ) * iIndexWeight;
tCtx.CalcSort ( tMatch );
if ( tCtx.m_pWeightFilter && !tCtx.m_pWeightFilter->Eval ( tMatch ) )
{
tCtx.FreeDataSort ( tMatch );
continue;
}
// storing segment in matches tag for finding strings attrs offset later, biased against default zero
tMatch.m_iTag = iTag;
bool bNewMatch = false;
for ( auto* pSorter : dSorters )
{
bNewMatch |= pSorter->Push ( tMatch );
if ( tCtx.m_uPackedFactorFlags & SPH_FACTOR_ENABLE )
{
RowTagged_t tJustPushed = pSorter->GetJustPushed();
VecTraits_T<RowTagged_t> dJustPopped = pSorter->GetJustPopped();
pRanker->ExtraData ( EXTRA_SET_MATCHPUSHED, (void**)&tJustPushed );
pRanker->ExtraData ( EXTRA_SET_MATCHPOPPED, (void**)&dJustPopped );
}
}
// stringptr expressions should be duplicated (or taken over) at this point
tCtx.FreeDataFilter ( tMatch );
tCtx.FreeDataSort ( tMatch );
if ( bNewMatch )
if ( --iCutoff==0 )
break;
}
if ( iCutoff==0 )
{
iSeg = dRamChunks.GetLength();
break;
}
}
}
}
static bool DoFullTextSearch ( const RtSegVec_c & dRamChunks, const ISphSchema & tMaxSorterSchema, const ISphSchema & tIndexSchema, const CSphQuery & tQuery, const char * szIndexName, const CSphMultiQueryArgs & tArgs, int iMatchPoolSize, int iStackNeed, RtQwordSetup_t & tTermSetup, QueryProfile_c * pProfiler, CSphQueryContext & tCtx, VecTraits_T<ISphMatchSorter*> & dSorters, XQQuery_t & tParsed, CSphQueryResultMeta & tMeta, ISphMatchSorter * pSorter )
{
// set zonespanlist settings
tParsed.m_bNeedSZlist = tQuery.m_bZSlist;
return Threads::Coro::ContinueBool ( iStackNeed, [&] {
// setup query
// must happen before index-level reject, in order to build proper keyword stats
std::unique_ptr<ISphRanker> pRanker = sphCreateRanker ( tParsed, tQuery, tMeta, tTermSetup, tCtx, tMaxSorterSchema );
if ( !pRanker )
return false;
tCtx.SetupExtraData ( pRanker.get (), pSorter );
pRanker->ExtraData ( EXTRA_SET_POOL_CAPACITY, (void **) &iMatchPoolSize );
// check for the possible integer overflow in m_dPool.Resize
int64_t iPoolSize = 0;
if ( pRanker->ExtraData ( EXTRA_GET_POOL_SIZE, (void **) &iPoolSize ) && iPoolSize>INT_MAX )
{
tMeta.m_sError.SetSprintf ( "ranking factors pool too big (%d Mb), reduce max_matches",
(int) ( iPoolSize / 1024 / 1024 ) );
return false;
}
// probably redundant, but just in case
SwitchProfile ( pProfiler, SPH_QSTATE_INIT );
// search segments no looking to max_query_time
// FIXME!!! move searching at segments before disk chunks as result set is safe with kill-lists
if ( !dRamChunks.IsEmpty () )
{
// FIXME! OPTIMIZE! check if we can early reject the whole index
// do searching
int iCutoff = ApplyImplicitCutoff ( tQuery, dSorters, true );
PerformFullTextSearch ( dRamChunks, tTermSetup, pRanker.get (), tArgs.m_iIndexWeight, iCutoff, pProfiler, tCtx, dSorters );
}
if ( !FinalExpressionCalculation ( tCtx, dRamChunks, dSorters, tArgs.m_bFinalizeSorters, tMeta ) )
return false;
//////////////////////
// copying match's attributes to external storage in result set
//////////////////////
SwitchProfile ( pProfiler, SPH_QSTATE_FINALIZE );
pRanker->FinalizeCache ( tMaxSorterSchema );
return true;
});
}
// Extract subset of chunks/segments for ops with list of chunk, i.e. for 'select .. from rt.0.2.4'
// if dChunks is empty (i.e. no subset required) - returns whole tOrigin, i.e. all chunks/segments
// if tOrigin has 3 disk chunks + 2 ram chunks, as {0d,1d,2d;0r,1r} and dChunks is {0,2,4} -
// result will have 2 disk chunks + 1 ram chunk, as {0d,2d;1r}
ConstRtData FilterReaderChunks ( ConstRtData tOrigin, const VecTraits_T<int64_t>& dChunks )
{
if ( dChunks.IsEmpty() )
return tOrigin;
CSphVector<int> dOrderedChunks;
dChunks.for_each ( [&dOrderedChunks] ( int64_t iVal ) { dOrderedChunks.Add ( (int) iVal ); } );
dOrderedChunks.Uniq(); // implies also Sort()
auto iDiskBound = tOrigin.m_pChunks->GetLength();
auto iAllBound = iDiskBound + tOrigin.m_pSegs->GetLength();
int iDiskSelected = 0;
int iRamSelected = 0;
dOrderedChunks.any_of ( [&] ( int iVal )
{
if ( iVal<iDiskBound )
{
++iDiskSelected;
return false;
}
if ( iVal<iAllBound )
{
++iRamSelected;
return false;
}
return true;
});
auto pChunks = new DiskChunkVec_c;
pChunks->Resize ( iDiskSelected );
for ( int i = 0; i<iDiskSelected; ++i )
( *pChunks )[i] = ( *tOrigin.m_pChunks )[dOrderedChunks[i]];
auto pSegments = new RtSegVec_c;
pSegments->Resize ( iRamSelected );
for ( int i = 0; i<iRamSelected; ++i )
( *pSegments )[i] = ( *tOrigin.m_pSegs )[dOrderedChunks[iDiskSelected+i]-iDiskBound];
ConstDiskChunkVecRefPtr_t pConstChunks;
ConstRtSegVecRefPtr_t pConstSegments;
pConstChunks = pChunks;
pConstSegments = pSegments;
return { pConstChunks, pConstSegments };
}
// FIXME! missing MVA, index_exact_words support
// FIXME? any chance to factor out common backend agnostic code?
// FIXME? do we need to support pExtraFilters?
bool RtIndex_c::MultiQuery ( CSphQueryResult & tResult, const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter *> & dAllSorters, const CSphMultiQueryArgs & tArgs ) const
{
// to avoid the checking of a ppSorters's element for NULL on every next step,
// just filter out all nulls right here
CSphVector<ISphMatchSorter*> dSorters;
dSorters.Reserve ( dAllSorters.GetLength() );
dAllSorters.Apply ([&dSorters] ( ISphMatchSorter* p ) { if ( p ) dSorters.Add(p); });
auto& tMeta = *tResult.m_pMeta;
// if we have anything to work with
if ( dSorters.IsEmpty() )
{
tMeta.m_iQueryTime = 0;
return false;
}
assert ( tArgs.m_iTag==0 );
MEMORY ( MEM_RT_QUERY );
// start counting
tMeta.m_iQueryTime = 0;
int64_t tmQueryStart = sphMicroTimer();
auto tmCpuQueryStart = sphTaskCpuTimer();
QueryProfile_c * pProfiler = tMeta.m_pProfile;
CSphScopedProfile tProf ( pProfiler, SPH_QSTATE_DICT_SETUP );
// force ext2 mode for them
// FIXME! eliminate this const breakage
const_cast<CSphQuery*> ( &tQuery )->m_eMode = SPH_MATCH_EXTENDED2;
auto tRtData = RtData();
// debug hack (don't use ram chunk in debug modeling mode)
if_const( MODELING )
tRtData.m_pSegs = new RtSegVec_c;
tRtData = FilterReaderChunks ( tRtData, tQuery.m_dIntSubkeys );
RtGuard_t tGuard ( std::move ( tRtData ) );
auto& dDiskChunks = tGuard.m_dDiskChunks;
// wrappers
DictRefPtr_c pDict = GetStatelessDict ( m_pDict );
if ( m_bKeywordDict && IsStarDict ( m_bKeywordDict ) )
SetupStarDictV8 ( pDict );
if ( m_tSettings.m_bIndexExactWords )
SetupExactDict ( pDict );
const QueryParser_i * pQueryParser = tQuery.m_pQueryParser;
assert ( pQueryParser );
const bool bFullscan = pQueryParser->IsFullscan ( tQuery );
// calculate local idf for RT with disk chunks
// in case of local_idf set but no external hash no full-scan query and RT has disk chunks
const SmallStringHash_T<int64_t> * pLocalDocs = tArgs.m_pLocalDocs;
SmallStringHash_T<int64_t> hLocalDocs;
int64_t iTotalDocs = ( tArgs.m_iTotalDocs ? tArgs.m_iTotalDocs : m_tStats.m_iTotalDocuments );
// already might local df calculated and set by distributed index
bool bGotLocalDF = ( tArgs.m_bLocalDF && tArgs.m_pLocalDocs );
// if not explicitly disbled lets calculate local_idf per disk chunks if it not was already calculated per distributed index
if ( !bGotLocalDF && !bFullscan && tQuery.m_eRanker!=SPH_RANK_NONE && tQuery.m_bLocalDF.value_or ( true ) && dDiskChunks.GetLength()>1 )
{
SwitchProfile ( pProfiler, SPH_QSTATE_LOCAL_DF );
GetKeywordsSettings_t tSettings;
tSettings.m_bStats = true;
// do not want to expand keywords and fold back its statistics as it could take too much time
tSettings.m_bAllowExpansion = false;
CSphVector < CSphKeywordInfo > dKeywords;
DoGetKeywords ( dKeywords, tQuery.m_sQuery.cstr(), tSettings, false, nullptr, tGuard );
for ( auto & tKw : dKeywords )
if ( !hLocalDocs.Exists ( tKw.m_sNormalized ) ) // skip dupes
hLocalDocs.Add ( tKw.m_iDocs, tKw.m_sNormalized );
pLocalDocs = &hLocalDocs;
iTotalDocs = GetStats().m_iTotalDocuments;
bGotLocalDF = true;
}
SwitchProfile ( pProfiler, SPH_QSTATE_INIT );
// FIXME! each result will point to its own MVA and string pools
//////////////////////
// search disk chunks
//////////////////////
tMeta.m_bHasPrediction = tQuery.m_iMaxPredictedMsec>0;
MiniTimer_c dTimerGuard;
int64_t tmMaxTimer = dTimerGuard.Engage ( tQuery.m_uMaxQueryMsec ); // max_query_time
SorterSchemaTransform_c tSSTransform ( dDiskChunks.GetLength(), tArgs.m_bFinalizeSorters );
if ( !dDiskChunks.IsEmpty() )
{
if ( !QueryDiskChunks ( tQuery, tMeta, tArgs, tGuard, dSorters, pProfiler, bGotLocalDF, pLocalDocs, iTotalDocs, GetName(), tSSTransform, tmMaxTimer ) )
return false;
}
////////////////////
// search RAM chunk
////////////////////
SwitchProfile ( pProfiler, SPH_QSTATE_INIT );
// select the sorter with max schema
// uses GetAttrsCount to get working facets (was GetRowSize)
int iMaxSchemaIndex, iMatchPoolSize;
std::tie ( iMaxSchemaIndex, iMatchPoolSize ) = GetMaxSchemaIndexAndMatchCapacity ( dSorters );
if ( iMaxSchemaIndex==-1 )
return false;
const ISphSchema & tMaxSorterSchema = *( dSorters[iMaxSchemaIndex]->GetSchema ());
auto dSorterSchemas = SorterSchemas ( dSorters, iMaxSchemaIndex );
// setup calculations and result schema
CSphQueryContext tCtx ( tQuery );
tCtx.m_pProfile = pProfiler;
tCtx.m_pLocalDocs = pLocalDocs;
tCtx.m_iTotalDocs = iTotalDocs;
tCtx.m_uPackedFactorFlags = tArgs.m_uPackedFactorFlags;
// setup search terms
RtQwordSetup_t tTermSetup ( tGuard );
tTermSetup.SetDict ( pDict );
tTermSetup.m_pIndex = this;
tTermSetup.m_iDynamicRowitems = tMaxSorterSchema.GetDynamicSize();
tTermSetup.m_iMaxTimer = dTimerGuard.Engage ( tQuery.m_uMaxQueryMsec ); // max_query_time
tTermSetup.m_pWarning = &tMeta.m_sWarning;
tTermSetup.SetSegment ( -1 );
tTermSetup.m_pCtx = &tCtx;
tTermSetup.m_bHasWideFields = ( m_tSchema.GetFieldsCount()>32 );
// setup prediction constrain
CSphQueryStats tQueryStats;
int64_t iNanoBudget = (int64_t)(tQuery.m_iMaxPredictedMsec) * 1000000; // from milliseconds to nanoseconds
tQueryStats.m_pNanoBudget = &iNanoBudget;
if ( tMeta.m_bHasPrediction )
tTermSetup.m_pStats = &tQueryStats;
// bind weights
tCtx.BindWeights ( tQuery, m_tSchema, tMeta.m_sWarning );
CSphVector<BYTE> dFiltered;
const BYTE * sModifiedQuery = (const BYTE *)tQuery.m_sQuery.cstr();
FieldFilterOptions_t tFFOptions { tQuery.m_eJiebaMode };
if ( m_pFieldFilter && sModifiedQuery && m_pFieldFilter->Clone ( &tFFOptions )->Apply ( sModifiedQuery, dFiltered, true ) )
sModifiedQuery = dFiltered.Begin();
// parse query
SwitchProfile ( pProfiler, SPH_QSTATE_PARSE );
XQQuery_t tParsed;
// FIXME!!! provide segments list instead index to tTermSetup.m_pIndex
CSphScopedPayload tPayloads;
// FIXME!!! add proper
// - qcache invalidation after INSERT \ DELETE \ UPDATE and for plain index afte UPDATE #256
// - qcache duplicates removal from killed document at segment #263
tCtx.m_bSkipQCache = true;
int iStackNeed = -1;
// no need to create ranker, etc if there's no query
if ( !bFullscan )
{
assert ( m_pQueryTokenizer.Ptr() && m_pQueryTokenizerJson.Ptr() );
if ( !pQueryParser->ParseQuery ( tParsed, (const char *)sModifiedQuery, &tQuery, m_pQueryTokenizer, m_pQueryTokenizerJson, &m_tSchema, pDict, m_tSettings, &m_tMorphFields ) )
{
tMeta.m_sError = tParsed.m_sParseError;
iStackNeed = 0;
} else
{
iStackNeed = PrepareFTSearch ( this, IsStarDict ( m_bKeywordDict ), m_bKeywordDict, m_tMutableSettings.m_iExpandKeywords, m_iExpansionLimit, m_tSettings, tQuery,(cRefCountedRefPtrGeneric_t) tGuard.m_tSegmentsAndChunks.m_pSegs, pDict, tMeta, pProfiler, &tPayloads, tParsed );
}
}
// empty index, empty result. Must be AFTER PrepareFTSearch, since it prepares list of words
if ( tGuard.m_dRamSegs.IsEmpty() )
{
for ( auto i : dSorters )
tSSTransform.Transform ( i, tGuard );
tResult.m_pDocstore = m_tSchema.HasStoredFields () ? this : nullptr;
tMeta.m_iQueryTime = 0;
return true;
}
if ( !iStackNeed )
return false;
auto& tSess = session::Info();
tSess.m_pSessionOpaque1 = (void*)(const DocstoreReader_i*)this;
tSess.m_pSessionOpaque2 = nullptr;
bool bParsedFullscan = bFullscan || pQueryParser->IsFullscan(tParsed);
CSphVector<CSphFilterSettings> dTransformedFilters; // holds filter settings if they were modified. filters hold pointers to those settings
CSphVector<FilterTreeItem_t> dTransformedFilterTree;
if ( !SetupFilters ( tQuery, tMaxSorterSchema, m_tSchema, bParsedFullscan, tCtx, dTransformedFilters, dTransformedFilterTree, dSorterSchemas, tMeta ) )
return false;
bool bResult;
if ( bParsedFullscan )
bResult = DoFullScanQuery ( tGuard.m_dRamSegs, tMaxSorterSchema, m_tSchema, tQuery, tArgs, m_iStride, tmMaxTimer, pProfiler, tCtx, dSorters, tMeta );
else
{
CSphMultiQueryArgs tFTArgs ( tArgs.m_iIndexWeight );
tFTArgs.m_bFinalizeSorters = tArgs.m_bFinalizeSorters;
tMeta.m_bBigram = ( m_tSettings.m_eBigramIndex!=SPH_BIGRAM_NONE );
bResult = DoFullTextSearch ( tGuard.m_dRamSegs, tMaxSorterSchema, m_tSchema, tQuery, GetName(), tFTArgs, iMatchPoolSize, iStackNeed, tTermSetup, pProfiler, tCtx, dSorters, tParsed, tMeta, dSorters.GetLength()==1 ? dSorters[0] : nullptr );
}
if (!bResult)
return false;
MEMORY ( MEM_RT_RES_STRINGS );
SwitchProfile ( pProfiler, SPH_QSTATE_DYNAMIC );
// create new standalone schema for sorters (independent of any external indexes/pools/storages)
// modify matches inside the sorters to work with the new schema
for ( auto i : dSorters )
tSSTransform.Transform ( i, tGuard );
if ( tMeta.m_bHasPrediction )
tMeta.m_tStats.Add ( tQueryStats );
tResult.m_pDocstore = m_tSchema.HasStoredFields() ? this : nullptr;
tMeta.m_iQueryTime = int ( ( sphMicroTimer()-tmQueryStart )/1000 );
tMeta.m_iCpuTime += sphTaskCpuTimer ()-tmCpuQueryStart;
return true;
}
void RtIndex_c::AddKeywordStats ( BYTE * sWord, const BYTE * sTokenized, const DictRefPtr_c& pDict, bool bGetStats, int iQpos, RtQword_t * pQueryWord, CSphVector <CSphKeywordInfo> & dKeywords, const RtSegVec_c& dRamSegs ) const
{
assert ( !bGetStats || pQueryWord );
SphWordID_t iWord = pDict->GetWordID ( sWord );
if ( !iWord )
return;
if ( bGetStats )
{
pQueryWord->Reset();
pQueryWord->m_uWordID = iWord;
pQueryWord->m_sWord = (const char *)sTokenized;
pQueryWord->m_sDictWord = (const char *)sWord;
for ( const auto& pSeg : dRamSegs )
RtQwordSetupSegment ( pQueryWord, pSeg, false );
}
CSphKeywordInfo & tInfo = dKeywords.Add();
tInfo.m_sTokenized = (const char *)sTokenized;
tInfo.m_sNormalized = (const char*)sWord;
tInfo.m_iDocs = bGetStats ? pQueryWord->m_iDocs : 0;
tInfo.m_iHits = bGetStats ? pQueryWord->m_iHits : 0;
tInfo.m_iQpos = iQpos;
RemoveDictSpecials ( tInfo.m_sNormalized, ( m_tSettings.m_eBigramIndex!=SPH_BIGRAM_NONE ) );
}
struct CSphRtQueryFilter : public ISphQueryFilter, public ISphNoncopyable
{
const RtIndex_c * m_pIndex;
RtQword_t * m_pQword;
bool m_bGetStats = false;
const RtSegVec_c& m_tGuard;
CSphRtQueryFilter ( const RtIndex_c * pIndex, RtQword_t * pQword, const RtSegVec_c& tGuard )
: m_pIndex ( pIndex )
, m_pQword ( pQword )
, m_tGuard ( tGuard )
{}
void AddKeywordStats ( BYTE * sWord, const BYTE * sTokenized, int iQpos, CSphVector <CSphKeywordInfo> & dKeywords ) final
{
assert ( m_pIndex && m_pQword );
m_pIndex->AddKeywordStats ( sWord, sTokenized, m_pDict, m_tFoldSettings.m_bStats, iQpos, m_pQword, dKeywords, m_tGuard );
}
};
bool RtIndex_c::DoGetKeywords ( CSphVector<CSphKeywordInfo> & dKeywords, const char * sQuery, const GetKeywordsSettings_t & tSettings, bool bFillOnly, CSphString * pError, const RtGuard_t& tGuard ) const
{
if ( !bFillOnly )
dKeywords.Resize ( 0 );
if ( ( bFillOnly && !dKeywords.GetLength() ) || ( !bFillOnly && ( !sQuery || !sQuery[0] ) ) )
return true;
RtQword_t tQword;
TokenizerRefPtr_c pTokenizer = m_pQueryTokenizer->Clone ( SPH_CLONE );
pTokenizer->EnableTokenizedMultiformTracking ();
// need to support '*' and '=' but not the other specials
// so m_pQueryTokenizer does not work for us, gotta clone and setup one manually
DictRefPtr_c pDict = GetStatelessDict ( m_pDict );
if ( IsStarDict ( m_bKeywordDict ) )
{
SetupStarTokenizer ( pTokenizer );
if ( m_bKeywordDict )
SetupStarDictV8 ( pDict );
}
if ( m_tSettings.m_bIndexExactWords )
{
SetupExactTokenizer ( pTokenizer );
SetupExactDict ( pDict );
}
if ( !m_tSettings.m_sIndexTokenFilter.IsEmpty() )
{
CSphString sError;
Tokenizer::AddPluginFilterTo ( pTokenizer, m_tSettings.m_sIndexTokenFilter, sError );
if ( pError && !pError->IsEmpty() )
{
*pError = sError;
return false;
}
if ( !pTokenizer->SetFilterSchema ( m_tSchema, sError ) )
{
if ( pError )
*pError = sError;
return false;
}
}
CSphVector<BYTE> dFiltered;
const BYTE * sModifiedQuery = (const BYTE *)sQuery;
FieldFilterOptions_t tFFOptions { tSettings.m_eJiebaMode };
if ( m_pFieldFilter && sQuery && m_pFieldFilter->Clone ( &tFFOptions )->Apply ( sModifiedQuery, dFiltered, true ) )
sModifiedQuery = dFiltered.Begin();
// FIXME!!! missing bigram
bool bHasWildcards = false;
if ( !bFillOnly )
{
ExpansionContext_t tExpCtx;
// query defined options
tExpCtx.m_iExpansionLimit = tSettings.m_iExpansionLimit ? tSettings.m_iExpansionLimit : m_iExpansionLimit;
tExpCtx.m_bAllowExpansion = ( tSettings.m_bAllowExpansion && m_bKeywordDict && IsStarDict ( m_bKeywordDict ) );
bool bExpandWildcards = ( tExpCtx.m_bAllowExpansion && !tSettings.m_bFoldWildcards );
pTokenizer->SetBuffer ( sModifiedQuery, (int)strlen ( (const char*)sModifiedQuery ) );
CSphRtQueryFilter tAotFilter ( this, &tQword, tGuard.m_dRamSegs );
tAotFilter.m_pTokenizer = std::move ( pTokenizer );
tAotFilter.m_pDict = std::move ( pDict );
tAotFilter.m_pSettings = &m_tSettings;
tAotFilter.m_tFoldSettings = tSettings;
tAotFilter.m_tFoldSettings.m_bFoldWildcards = !bExpandWildcards;
tExpCtx.m_pWordlist = this;
tExpCtx.m_iMinPrefixLen = m_tSettings.GetMinPrefixLen ( m_bKeywordDict );
tExpCtx.m_iMinInfixLen = m_tSettings.m_iMinInfixLen;
tExpCtx.m_bHasExactForms = ( m_pDict->HasMorphology() || m_tSettings.m_bIndexExactWords );
tExpCtx.m_bMergeSingles = false;
tExpCtx.m_pIndexData = tGuard.m_tSegmentsAndChunks.m_pSegs;
tAotFilter.GetKeywords ( dKeywords, tExpCtx );
bHasWildcards = tExpCtx.m_bHasWildcards;
} else
{
BYTE sWord[SPH_MAX_KEYWORD_LEN];
ARRAY_FOREACH ( i, dKeywords )
{
CSphKeywordInfo & tInfo = dKeywords[i];
int iLen = tInfo.m_sTokenized.Length();
memcpy ( sWord, tInfo.m_sTokenized.cstr(), iLen );
sWord[iLen] = '\0';
SphWordID_t iWord = pDict->GetWordID ( sWord );
if ( iWord )
{
tQword.Reset();
tQword.m_uWordID = iWord;
tQword.m_sWord = tInfo.m_sTokenized;
tQword.m_sDictWord = (const char *)sWord;
for ( const auto& pSeg : tGuard.m_dRamSegs )
RtQwordSetupSegment ( &tQword, pSeg, false );
tInfo.m_iDocs += tQword.m_iDocs;
tInfo.m_iHits += tQword.m_iHits;
}
}
}
// process disk chunks too but only if:
// - need term stats
// - has terms with wildcards as these are expanded differently
if ( !tSettings.m_bStats && !bHasWildcards )
return true;
if ( bFillOnly )
{
for ( auto& pChunk : tGuard.m_dDiskChunks )
pChunk->Cidx().FillKeywords ( dKeywords );
} else
{
// bigram and expanded might differs need to merge infos
int iWasKeywords = dKeywords.GetLength();
CSphVector<CSphKeywordInfo> dChunkKeywords;
for ( auto& pChunk: tGuard.m_dDiskChunks )
{
pChunk->Cidx().GetKeywords ( dChunkKeywords, (const char*)sModifiedQuery, tSettings, pError );
dKeywords.Append ( dChunkKeywords );
dChunkKeywords.Resize ( 0 );
}
// merge keywords from RAM parts with disk keywords
if ( iWasKeywords!=dKeywords.GetLength() )
UniqKeywords ( dKeywords );
}
return true;
}
bool RtIndex_c::GetKeywords ( CSphVector<CSphKeywordInfo> & dKeywords, const char * sQuery, const GetKeywordsSettings_t & tSettings, CSphString * pError ) const
{
auto tGuard = RtGuard();
return DoGetKeywords ( dKeywords, sQuery, tSettings, false, pError, tGuard );
}
bool RtIndex_c::FillKeywords ( CSphVector<CSphKeywordInfo> & dKeywords ) const
{
auto tGuard = RtGuard();
return DoGetKeywords ( dKeywords, nullptr, GetKeywordsSettings_t(), true, nullptr, tGuard );
}
// for each RamSegment collect list of rows and indexes in update ctx
RowsToUpdateData_t CollectUpdatableRows ( UpdateContext_t & tCtx, const ConstRtSegmentRefPtf_t & tSegment ) noexcept
{
RowsToUpdateData_t dUpdateSet;
// collect idxes of alive (not-yet-updated) rows
const auto & dDocids = tCtx.m_tUpd.m_pUpdate->m_dDocids;
ARRAY_CONSTFOREACH ( i, dDocids )
{
if ( tCtx.m_tUpd.m_dUpdated.BitGet ( i ) )
continue;
auto tRowID = tSegment->GetAliveRowidByDocid ( dDocids[i] );
if ( tRowID==INVALID_ROWID )
continue;
auto & dUpd = dUpdateSet.Add ();
dUpd.m_tRow = tRowID;
dUpd.m_iIdx = i;
}
return dUpdateSet;
}
// when RAM segment is used in merge or saving to disk - it's data finally became merged into new segment or chunk,
// and source segment then unlinked. If during that operation attributes was updated by aside 'update attributes' call,
// the changes will not participate in final result and so, will be discarded with source segment unlink.
// to deal with this, we set 'dirty' bit on segment to show that updates to it must be also collected and then applied
// to final resulting chunk/segment. That bit set before merging attributes and exists till the end of segment's lifetime.
// Here is first part of postponed merge - after update we collect docs updated in segment and store them into vec of
// updates (as it might happen be more than one update during the operation)
void RtSegment_t::MaybeAddPostponedUpdate ( const RowsToUpdate_t& dRows, const UpdateContext_t& tCtx )
{
TRACE_CORO ( "rt", "IndexSegment_c::Update_UpdateAttributes");
if ( !m_bAttrsBusy.load ( std::memory_order_acquire ) )
return;
// segment is now saving/merging - add postponed update.
auto& tUpd = tCtx.m_tUpd;
// count exact N of affected rows (no need to waste space for reserve in this route at all)
auto iRows = dRows.count_of ( [&tUpd] ( auto& i ) { return tUpd.m_dUpdated.BitGet ( i.m_iIdx ); } );
if ( !iRows )
return;
auto& tNewPostponedUpdate = m_dPostponedUpdates.Add();
tNewPostponedUpdate.m_pUpdate = MakeReusableUpdate ( tUpd.m_pUpdate );
tNewPostponedUpdate.m_dRowsToUpdate.Reserve ( iRows );
// collect indexes of actually updated rows and docids
dRows.for_each ( [&tUpd, &tNewPostponedUpdate] ( const auto& i ) {
if ( tUpd.m_dUpdated.BitGet ( i.m_iIdx ) )
tNewPostponedUpdate.m_dRowsToUpdate.Add().m_iIdx = i.m_iIdx;
});
}
bool RtIndex_c::Update_DiskChunks ( AttrUpdateInc_t& tUpd, const DiskChunkSlice_t& dDiskChunks, CSphString & sError ) REQUIRES ( m_tWorkers.SerialChunkAccess() )
{
TRACE_CORO ( "rt", "RtIndex_c::Update_DiskChunks" );
assert ( Coro::CurrentScheduler() == m_tWorkers.SerialChunkAccess() );
bool bCritical = false;
CSphString sWarning;
bool bEnabled = m_tSaving.ActiveStateIs ( SaveState_c::ENABLED );
bool bNeedWait = !bEnabled;
for ( auto& pDiskChunk : dDiskChunks )
{
if ( tUpd.AllApplied () )
break;
pDiskChunk->m_iPendingUpdates.fetch_add ( 1, std::memory_order_relaxed );
AT_SCOPE_EXIT ( [pDiskChunk] { pDiskChunk->m_iPendingUpdates.fetch_sub ( 1, std::memory_order_relaxed ); } );
// acquire fine-grain lock
BEGIN_CORO ( "wait", "disk-chunk w-lock");
SccWL_t wLock ( pDiskChunk->m_tLock );
END_CORO ( "wait" );
// if saving is disabled, and we NEED to actually update a disk chunk,
// we'll pause that action, waiting until index is unlocked.
if ( bNeedWait )
{
bNeedWait = false;
BEGIN_CORO ( "wait", "WaitEnabledOrShutdown" );
bEnabled = m_tSaving.WaitEnabledOrShutdown ();
END_CORO ( "wait" );
}
int iRes = bEnabled ? pDiskChunk->CastIdx().CheckThenUpdateAttributes ( tUpd, bCritical, sError, sWarning ) : -1;
// FIXME! need to handle critical failures here (chunk is unusable at this point)
assert ( !bCritical );
// FIXME! maybe emit a warning to client as well?
if ( iRes<0 )
return false;
// update stats
m_uDiskAttrStatus |= pDiskChunk->Cidx().GetAttributeStatus();
}
return true;
}
// thread-safe, as segment is locked up level before calling RAM segment update
bool RtSegment_t::Update_WriteBlobRow ( UpdateContext_t & tCtx, RowID_t tRowID, ByteBlob_t tBlob,
int nBlobAttrs, const CSphAttrLocator & tBlobRowLoc, bool & bCritical, CSphString & sError ) NO_THREAD_SAFETY_ANALYSIS
{
// fixme! Ensure pSegment->m_tLock acquired exclusively...
auto pDocinfo = tCtx.GetDocinfo ( tRowID );
BYTE* pExistingBlob = m_dBlobs.begin() + sphGetRowAttr ( pDocinfo, tBlobRowLoc );
DWORD uExistingBlobLen = sphGetBlobTotalLen ( pExistingBlob, nBlobAttrs );
bCritical = false;
// overwrite old record
if ( (DWORD)tBlob.second<=uExistingBlobLen )
{
memcpy ( pExistingBlob, tBlob.first, tBlob.second );
return true;
}
int iPoolSize = m_dBlobs.GetLength();
m_dBlobs.Append ( tBlob );
sphSetRowAttr ( pDocinfo, tBlobRowLoc, iPoolSize );
// update blob pool ptrs since they could have changed after the resize
tCtx.m_pBlobPool = m_dBlobs.begin();
return true;
}
// FIXME! might be inconsistent in case disk chunk update fails
int RtIndex_c::CheckThenUpdateAttributes ( AttrUpdateInc_t& tUpd, bool& bCritical, CSphString& sError, CSphString& sWarning )
{
TRACE_CORO ( "rt", "CheckThenUpdateAttributes" );
const auto& tUpdc = *tUpd.m_pUpdate;
assert ( tUpdc.m_dRowOffset.IsEmpty() || tUpdc.m_dDocids.GetLength()==tUpdc.m_dRowOffset.GetLength() );
if ( tUpdc.m_dDocids.IsEmpty() || tUpd.AllApplied () )
return 0;
// FIXME!!! grab Writer lock to prevent segments retirement during commit(merge)
if ( m_tRtChunks.IsEmpty() )
return 0;
int iUpdated = tUpd.m_iAffected;
if ( !Update_CheckAttributes ( *tUpd.m_pUpdate, m_tSchema, sError ) )
return -1;
UpdateContext_t tCtx ( tUpd, m_tSchema );
tCtx.PrepareListOfUpdatedAttributes ( sError );
// do update in serial fiber. That ensures no concurrency with set of chunks changing, however need to dispatch
// with changers themselves (merge segments, merge chunks, save disk chunks).
// fixme! Find another way (dedicated fiber?), as long op in serial fiber may pause another ops.
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
auto tGuard = RtGuard ();
// PauseCheck("rtpause"); // catch if something happened between RtGuard() and actual updates
TRACE_CORO ( "rt", "UpdateAttributes_serial" );
for ( auto& dRamSegment : tGuard.m_dRamSegs )
{
auto dRamUpdateSet = CollectUpdatableRows ( tCtx, dRamSegment );
if ( dRamUpdateSet.IsEmpty() )
continue;
auto* pSeg = const_cast<RtSegment_t*> ( (const RtSegment_t*) dRamSegment );
BEGIN_CORO ( "wait", "ram-seg-wlock");
SccWL_t wLock ( pSeg->m_tLock );
END_CORO ( "wait" );
assert ( pSeg->GetStride() == m_tSchema.GetRowSize() );
// point context to target segment
tCtx.m_pAttrPool = pSeg->m_dRows.begin();
tCtx.m_pBlobPool = pSeg->m_dBlobs.begin();
if ( !pSeg->Update_UpdateAttributes ( dRamUpdateSet, tCtx, bCritical, sError ) )
return -1;
pSeg->MaybeAddPostponedUpdate( dRamUpdateSet, tCtx );
if ( tUpd.AllApplied () )
break;
}
if ( !Update_DiskChunks ( tUpd, tGuard.m_dDiskChunks, sError ) ) // fixme!
sphWarn ( "INTERNAL ERROR: table %s update failure: %s", GetName(), sError.cstr() );
// bump the counter, binlog the update!
CommitUpdateAttributes ( &m_iTID, GetName(), tUpdc );
iUpdated = tUpd.m_iAffected - iUpdated;
if ( !tCtx.HandleJsonWarnings ( iUpdated, sWarning, sError ) )
return -1;
// all done
return iUpdated;
}
bool RtIndex_c::SaveAttributes ( CSphString & sError ) const
{
DWORD uStatus = m_uDiskAttrStatus;
bool bAllSaved = true;
const auto& pDiskChunks = m_tRtChunks.DiskChunks();
if ( pDiskChunks->IsEmpty() || m_tSaving.ActiveStateIs ( SaveState_c::DISCARD ) )
return true;
for ( auto& pChunk : *pDiskChunks )
bAllSaved &= pChunk->Cidx().SaveAttributes ( sError );
if ( uStatus==m_uDiskAttrStatus )
m_uDiskAttrStatus = 0;
return bAllSaved;
}
class OptimizeGuard_c final
{
RtIndex_c & m_tIndex;
const bool m_bPreviousOptimizeState;
public:
NONCOPYMOVABLE (OptimizeGuard_c);
explicit OptimizeGuard_c ( RtIndex_c & tIndex )
: m_tIndex { tIndex }, m_bPreviousOptimizeState { tIndex.StopOptimize () }
{}
~OptimizeGuard_c ()
{
m_tIndex.m_bOptimizeStop.store ( m_bPreviousOptimizeState, std::memory_order_relaxed );
}
};
// fixme! is NOT lru-safe, ensure index locked exclusively!
// (last is true as it w-locked before 'alter' called, however optimize call need special glance!)
bool RtIndex_c::AddRemoveColumnarAttr ( RtGuard_t & tGuard, bool bAdd, const CSphString & sAttrName, ESphAttr eAttrType, const CSphSchema & tOldSchema, const CSphSchema & tNewSchema, CSphString & sError )
{
for ( auto& pConstSeg : tGuard.m_dRamSegs )
{
auto* pSeg = const_cast<RtSegment_t*> ( pConstSeg.Ptr() );
assert ( pSeg );
auto pBuilder = CreateColumnarBuilderRT ( tNewSchema );
if ( !Alter_AddRemoveColumnar ( bAdd, tOldSchema, tNewSchema, pSeg->m_pColumnar.get(), pBuilder.get(), pSeg->m_uRows, GetFilebase(), sError ) )
return false;
pSeg->m_pColumnar = CreateColumnarRT ( tNewSchema, pBuilder.get() );
pSeg->UpdateUsedRam();
}
return true;
}
// fixme! is NOT lru-safe, ensure index locked exclusively!
// (last is true as it w-locked before 'alter' called, however optimize call need special glance!)
void RtIndex_c::AddRemoveRowwiseAttr ( RtGuard_t & tGuard, bool bAdd, const CSphString & sAttrName, ESphAttr eAttrType, const CSphSchema & tOldSchema, const CSphSchema & tNewSchema, CSphString & sError )
{
bool bHadBlobs = false;
for ( int i = 0; i < tOldSchema.GetAttrsCount() && !bHadBlobs; ++i )
bHadBlobs |= sphIsBlobAttr ( tOldSchema.GetAttr(i) );
bool bHaveBlobs = false;
for ( int i = 0; i < tNewSchema.GetAttrsCount() && !bHaveBlobs; ++i )
bHaveBlobs |= sphIsBlobAttr ( tNewSchema.GetAttr(i) );
bool bBlob = sphIsBlobAttr(eAttrType);
bool bBlobsModified = bBlob && ( bAdd || bHaveBlobs==bHadBlobs );
// now modify the ramchunk
for ( auto& pRSeg : tGuard.m_dRamSegs )
{
assert ( pRSeg );
CSphTightVector<CSphRowitem> dSPA;
CSphTightVector<BYTE> dSPB;
std::unique_ptr<WriteWrapper_c> pSPAWriteWrapper = CreateWriteWrapperMem ( dSPA );
std::unique_ptr<WriteWrapper_c> pSPBWriteWrapper = CreateWriteWrapperMem ( dSPB );
dSPA.Reserve ( pRSeg->m_uRows * m_iStride );
auto * pWSeg = const_cast<RtSegment_t*> ( pRSeg.Ptr() );
SccWL_t _ (pWSeg->m_tLock);
const CSphRowitem* pDocinfo = pWSeg->m_dRows.begin();
dSPB.Reserve ( pWSeg->m_dBlobs.GetLength() / 2 ); // reserve half of our current blobs, just in case
if ( !Alter_AddRemoveRowwiseAttr ( tOldSchema, tNewSchema, pDocinfo, pRSeg->m_uRows, pWSeg->m_dBlobs.begin(), *pSPAWriteWrapper, *pSPBWriteWrapper, bAdd, sAttrName ) )
sphWarning ( "%s attribute to %s: %s", bAdd ? "adding" : "removing", GetFilebase(), sError.cstr() );
pWSeg->m_dRows.SwapData(dSPA);
if ( bBlob || bBlobsModified )
pWSeg->m_dBlobs.SwapData(dSPB);
pRSeg->UpdateUsedRam();
}
}
// fixme! Need fine-grain locking, not const_cast!
void RtIndex_c::AddFieldToRamchunk ( const CSphString & sFieldName, DWORD uFieldFlags, const CSphSchema & tOldSchema, const CSphSchema & tNewSchema )
{
if ( !(uFieldFlags & CSphColumnInfo::FIELD_STORED) )
return;
if ( !m_pDocstoreFields )
m_pDocstoreFields = CreateDocstoreFields();
assert ( m_pDocstoreFields );
m_pDocstoreFields->AddField ( sFieldName, DOCSTORE_TEXT );
AddRemoveFromRamDocstore ( tOldSchema, tNewSchema );
}
static int GetNumStored ( const CSphSchema & tSchema )
{
int iStored = 0;
for ( int i = 0; i < tSchema.GetFieldsCount(); i++ )
if ( tSchema.IsFieldStored(i) )
iStored++;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
if ( tSchema.IsAttrStored(i) )
iStored++;
return iStored;
}
void RtIndex_c::AddRemoveFromRamDocstore ( const CSphSchema & tOldSchema, const CSphSchema & tNewSchema )
{
int iOldStored = GetNumStored(tOldSchema);
int iNewStored = GetNumStored(tNewSchema);
if ( iOldStored==iNewStored )
return;
bool bLastStored = iNewStored<iOldStored && ( !tNewSchema.HasStoredFields() && !tNewSchema.HasStoredAttrs() );
auto pSegs = m_tRtChunks.RamSegs();
for ( auto & pConstSeg : *pSegs )
{
auto * pSeg = const_cast<RtSegment_t*> ( pConstSeg.Ptr() );
if ( bLastStored )
pSeg->m_pDocstore.reset();
else
{
auto pNewDocstore = CreateDocstoreRT();
Alter_AddRemoveFromDocstore ( *pNewDocstore, pSeg->m_pDocstore.get(), pSeg->m_uRows, tNewSchema );
pSeg->m_pDocstore = std::move ( pNewDocstore );
}
pSeg->UpdateUsedRam();
}
}
void RtIndex_c::RemoveFieldFromRamchunk ( const CSphString & sFieldName, const CSphSchema & tOldSchema, const CSphSchema & tNewSchema )
{
if ( m_pDocstoreFields && m_pDocstoreFields->GetFieldId ( sFieldName, DOCSTORE_TEXT )!=-1 )
m_pDocstoreFields->RemoveField ( sFieldName, DOCSTORE_TEXT );
int iFieldId = tOldSchema.GetFieldIndex ( sFieldName.cstr () );
auto pSegs = m_tRtChunks.RamSegs();
for ( auto & pConstSeg : *pSegs )
{
auto* pSeg = const_cast<RtSegment_t*> ( pConstSeg.Ptr() );
assert ( pSeg );
DeleteFieldFromDict ( pSeg, iFieldId );
}
AddRemoveFromRamDocstore ( tOldSchema, tNewSchema );
}
bool RtIndex_c::AddRemoveField ( bool bAdd, const CSphString & sFieldName, DWORD uFieldFlags, CSphString & sError )
{
OptimizeGuard_c tStopOptimize ( *this );
// go to serial fiber.
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_SCHED ( "rt", "AddRemoveField" );
CSphSchema tOldSchema = m_tSchema;
CSphSchema tNewSchema = m_tSchema;
if ( !Alter_AddRemoveFieldFromSchema ( bAdd, tNewSchema, sFieldName, uFieldFlags, sError ) )
return false;
m_tSchema = tNewSchema;
auto tGuard = RtGuard();
// modify the in-memory data of disk chunks
// fixme: we can't rollback in-memory changes, so we just show errors here for now
for ( auto& pChunk : tGuard.m_dDiskChunks )
if ( !pChunk->CastIdx().AddRemoveField ( bAdd, sFieldName, uFieldFlags, sError ) )
sphWarning ( "%s attribute to %s.%d: %s", bAdd ? "adding" : "removing", GetFilebase(), pChunk->Cidx().m_iChunk, sError.cstr() );
if ( bAdd )
AddFieldToRamchunk ( sFieldName, uFieldFlags, tOldSchema, tNewSchema );
else
RemoveFieldFromRamchunk ( sFieldName, tOldSchema, tNewSchema );
// fixme: we can't rollback at this point
AlterSave ( true );
return true;
}
void RtIndex_c::AlterSave ( bool bSaveRam )
{
if ( bSaveRam )
{
Verify ( SaveRamChunk () );
}
SaveMeta ();
// fixme: notify that it was ALTER that caused the flush
Binlog::NotifyIndexFlush ( m_iTID, GetName(), Binlog::NoShutdown, Binlog::NoSave );
QcacheDeleteIndex ( GetIndexId() );
}
bool RtIndex_c::AddRemoveAttribute ( bool bAdd, const AttrAddRemoveCtx_t & tCtx, CSphString & sError )
{
if ( !m_tRtChunks.DiskChunks()->IsEmpty() && !m_tSchema.GetAttrsCount() )
{
sError = "table must already have attributes";
return false;
}
// here must be exclusively LOCKED access, we don't rely upon the topmost lock and go isolated ourselves
// stop all optimize tasks
OptimizeGuard_c tStopOptimize ( *this );
// go to serial fiber.
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_SCHED ( "rt", "AddRemoveAttribute" );
// wait all secondary service tasks (merge segments, save disk chunk) to finish and release all the segments.
WaitRAMSegmentsUnlocked();
// as we're in serial, here all index data exclusively belongs to us. No new commits, merges, flushes, etc. until
// we're finished.
AttrAddRemoveCtx_t tNewCtx = tCtx;
AttrEngine_e eAttrEngine = CombineEngines ( m_tSettings.m_eEngine, tCtx.m_eEngine );
if ( eAttrEngine==AttrEngine_e::COLUMNAR )
tNewCtx.m_uFlags |= CSphColumnInfo::ATTR_COLUMNAR;
else
tNewCtx.m_uFlags &= ~( CSphColumnInfo::ATTR_COLUMNAR_HASHES | CSphColumnInfo::ATTR_STORED );
CSphSchema tOldSchema = m_tSchema;
CSphSchema tNewSchema = m_tSchema;
if ( !Alter_AddRemoveFromSchema ( tNewSchema, tNewCtx, bAdd, sError ) )
return false;
m_tSchema = tNewSchema;
m_iStride = m_tSchema.GetRowSize();
auto tGuard = RtGuard();
// modify the in-memory data of disk chunks
// fixme: we can't rollback in-memory changes, so we just show errors here for now
for ( auto& pChunk : tGuard.m_dDiskChunks )
if ( !pChunk->CastIdx().AddRemoveAttribute ( bAdd, tNewCtx, sError ) )
sphWarning ( "%s attribute to %s.%d: %s", bAdd ? "adding" : "removing", GetFilebase(), pChunk->Cidx().m_iChunk, sError.cstr() );
bool bColumnar = bAdd ? tNewSchema.GetAttr ( tNewCtx.m_sName.cstr() )->IsColumnar() : tOldSchema.GetAttr ( tNewCtx.m_sName.cstr() )->IsColumnar();
if ( bColumnar )
{
if ( !AddRemoveColumnarAttr ( tGuard, bAdd, tNewCtx.m_sName, tNewCtx.m_eType, tOldSchema, tNewSchema, sError ) )
return false;
} else
AddRemoveRowwiseAttr ( tGuard, bAdd, tNewCtx.m_sName, tNewCtx.m_eType, tOldSchema, tNewSchema, sError );
AddRemoveFromRamDocstore ( tOldSchema, tNewSchema );
// fixme: we can't rollback at this point
AlterSave ( true );
return true;
}
//////////////////////////////////////////////////////////////////////////
// MAGIC CONVERSIONS
//////////////////////////////////////////////////////////////////////////
// ClientSession_c::Execute->HandleMysqlAttach->AttachDiskIndex
bool RtIndex_c::AttachDiskIndex ( CSphIndex * pIndex, bool bTruncate, bool & bFatal, CSphString & sError )
{
// from the next line we work in index simple scheduler. That made everything much simpler
// (no need to care about locks and order of access to ram segments and disk chunks)
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_SCHED ( "rt", "AttachDiskIndex" );
bFatal = false;
if ( bTruncate && !Truncate ( sError, TRUNCATE ) )
return false;
// safeguards
// we do not support some disk index features in RT just yet
if ( !CanAttach ( pIndex, sError ) )
return false;
// note: that is important. Active readers prohibited by topmost w-lock, but internal processes not!
// fixme! Looks like a dupe, look at few lines above
if ( bTruncate && !Truncate ( sError, TRUNCATE ) )
return false;
// attach to non-empty RT: first flush RAM segments to disk chunk, then apply upcoming index'es docs as k-list.
if ( !m_tRtChunks.RamSegs()->IsEmpty() && !SaveDiskChunk ( true ) )
return false;
if ( !AttachDiskChunkMove ( pIndex, bFatal, sError ) )
return false;
// FIXME? what about copying m_TID etc?
{ // update disk chunk list
auto tNewSet = RtWriter();
tNewSet.InitDiskChunks ( RtWriter_c::copy );
tNewSet.m_pNewDiskChunks->Add ( DiskChunk_c::make ( pIndex ) );
}
AttachSetSettings ( pIndex );
PostSetup();
// resave header file
SaveMeta();
// FIXME? do something about binlog too?
// Binlog::NotifyIndexFlush ( GetName(), m_iTID, false );
// all done, reset cache
QcacheDeleteIndex ( GetIndexId() );
QcacheDeleteIndex ( pIndex->GetIndexId() );
return true;
}
bool RtIndex_c::CanAttach ( const CSphIndex * pIndex, CSphString & sError ) const
{
// ATTACH to exist index require these checks
if ( !m_tRtChunks.IsEmpty() )
{
if ( m_pTokenizer->GetSettingsFNV()!=pIndex->GetTokenizer()->GetSettingsFNV() )
{
sError = "ATTACH currently requires same tokenizer settings (RT-side support not implemented yet)";
return false;
}
if ( m_pDict->GetSettingsFNV()!=pIndex->GetDictionary()->GetSettingsFNV() )
{
sError = "ATTACH currently requires same dictionary settings (RT-side support not implemented yet)";
return false;
}
if ( !GetMatchSchema().CompareTo ( pIndex->GetMatchSchema(), sError, true ) )
{
sError = "ATTACH currently requires same attributes declaration (RT-side support not implemented yet)";
return false;
}
}
return true;
}
bool RtIndex_c::AttachDiskChunkMove ( CSphIndex * pIndex, bool & bFatal, CSphString & sError ) REQUIRES ( m_tWorkers.SerialChunkAccess() )
{
int iTotalKilled = 0;
// attach to non-empty RT: apply upcoming index'es docs as k-list.
if ( !m_tRtChunks.IsEmpty() )
{
auto dIndexDocs = pIndex->BuildDocList();
if ( TlsMsg::HasErr () )
{
sError.SetSprintf ( "ATTACH failed, %s", TlsMsg::szError () );
return false;
}
iTotalKilled = ApplyKillList ( dIndexDocs );
}
// rename that source index to our last chunk
int iChunk = m_tChunkID.MakeChunkId ( m_tRtChunks );
auto eRenamed = pIndex->RenameEx ( GetFilename ( iChunk ) );
switch (eRenamed)
{
case RE_FATAL: // not just failed, but also rollback wasn't success. Source index is damaged!
bFatal = true;
// no break;
case RE_FAIL:
sError.SetSprintf ( "ATTACH failed, %s", pIndex->GetLastError().cstr() );
return false;
default: break;
}
m_tStats.m_iTotalBytes += pIndex->GetStats().m_iTotalBytes;
m_tStats.m_iTotalDocuments += pIndex->GetStats().m_iTotalDocuments-iTotalKilled;
pIndex->SetName ( SphSprintf ( "%s_%d", GetName(), iChunk ) ); // idx name is cosmetic thing
pIndex->SetBinlog ( false );
pIndex->m_iChunk = iChunk;
return true;
}
void RtIndex_c::AttachSetSettings ( CSphIndex * pIndex )
{
// copy schema from new index
SetSchema ( pIndex->GetMatchSchema() );
// copy tokenizer, dict etc settings from new index
m_tSettings = pIndex->GetSettings();
m_tSettings.m_dBigramWords.Reset();
m_pTokenizer = pIndex->GetTokenizer()->Clone ( SPH_CLONE_INDEX );
m_pDict = pIndex->GetDictionary()->Clone ();
}
bool RtIndex_c::AttachRtIndex ( RtIndex_i * pSrcIndex, bool bTruncate, bool & bFatal, CSphString & sError )
{
// from the next line we work in index simple scheduler. That made everything much simpler
// (no need to care about locks and order of access to ram segments and disk chunks)
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_SCHED ( "rt", "AttachDiskIndex" );
assert ( pSrcIndex );
bFatal = false;
if ( bTruncate && !Truncate ( sError, TRUNCATE ) )
return false;
// safeguards
// we do not support some disk index features in RT just yet
if ( !CanAttach ( pSrcIndex, sError ) )
return false;
// note: that is important. Active readers prohibited by topmost w-lock, but internal processes not!
// fixme! Looks like a dupe, look at few lines above
if ( bTruncate && !Truncate ( sError, TRUNCATE ) )
return false;
// attach to non-empty RT: first flush RAM segments to disk chunk, then apply upcoming index'es docs as k-list.
if ( !m_tRtChunks.RamSegs()->IsEmpty() && !SaveDiskChunk ( true ) )
return false;
auto * pSrcRtIndex = static_cast<RtIndex_c *>( pSrcIndex );
if ( !pSrcRtIndex->AttachSaveDiskChunk() )
return false;
{
// prevent optimize to start during the disk chunks stealing
OptimizeGuard_c tSrcStopOptimize ( *pSrcRtIndex );
OptimizeGuard_c tDstStopOptimize ( *this );
// collect all disk chunks from the source RT index in the brand new structure
auto tNewSet = RtWriter();
tNewSet.InitDiskChunks ( RtWriter_c::copy );
// need to reset m_bFinallyUnlink flag for the disk chunks moved here after all finishes well
int iUnlinkIndex = tNewSet.m_pNewDiskChunks->GetLength();
for ( ;; )
{
ConstDiskChunkRefPtr_t tChunk = pSrcRtIndex->PopDiskChunk();
if ( !tChunk )
break;
tChunk->m_bFinallyUnlink = true; // destroy the disk chunks on failure
if ( !AttachDiskChunkMove ( static_cast<CSphIndex *>( *tChunk ), bFatal, sError ) )
{
bFatal = true; // need to destroy source index in case of failure as it does not have right amount of disk chunks anymore
return false;
}
// update disk chunk list
tNewSet.m_pNewDiskChunks->Add ( tChunk );
}
// clean up all destroy flag for all moved disk chunks after loop finished well
for ( int i=iUnlinkIndex; i<tNewSet.m_pNewDiskChunks->GetLength(); i++ )
tNewSet.m_pNewDiskChunks->At ( i )->m_bFinallyUnlink = false;
}
AttachSetSettings ( pSrcIndex );
PostSetup();
// FIXME? what about copying m_TID etc?
// resave header file
SaveMeta();
// FIXME? do something about binlog too?
// Binlog::NotifyIndexFlush ( GetName(), m_iTID, false );
// all done, reset cache
QcacheDeleteIndex ( GetIndexId() );
QcacheDeleteIndex ( pSrcIndex->GetIndexId() );
return true;
}
bool RtIndex_c::AttachSaveDiskChunk()
{
if ( m_tRtChunks.RamSegs()->IsEmpty() )
return true;
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
return SaveDiskChunk ( true );
}
ConstDiskChunkRefPtr_t RtIndex_c::PopDiskChunk()
{
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
auto tNewSet = RtWriter();
tNewSet.InitDiskChunks ( RtWriter_c::empty );
return tNewSet.PopDiskChunk();
}
//////////////////////////////////////////////////////////////////////////
// TRUNCATE
//////////////////////////////////////////////////////////////////////////
// kill RAM chunk file
void RtIndex_c::UnlinkRAMChunk ( const char* szInfo )
{
CSphString sFile = GetFilename ( "ram" );
if ( ::unlink ( sFile.cstr() ) && errno != ENOENT && szInfo )
sphWarning ( "rt: %s failed to unlink %s: (errno=%d, error=%s)", szInfo, sFile.cstr(), errno, strerrorm ( errno ) );
}
bool RtIndex_c::Truncate ( CSphString&, Truncate_e eAction )
{
// TRUNCATE will drop everything; so all 'optimizing' should be discarded as useless
OptimizeGuard_c tStopOptimize ( *this );
// do truncate in serial fiber. As it is re-enterable, don't care if we already there.
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_SCHED ( "rt", "Truncate" );
// update and save meta
// indicate 0 disk chunks, we are about to kill them anyway
// current TID will be saved, so replay will properly skip preceding txns
m_tStats.Reset();
SaveMeta ( m_iTID, { nullptr, 0 } );
// allow binlog to unlink now-redundant data files
Binlog::NotifyIndexFlush ( m_iTID, GetName(), Binlog::NoShutdown, eAction==TRUNCATE ? Binlog::ForceSave : Binlog::DropTable );
// kill RAM chunk file
UnlinkRAMChunk ( "truncate" );
// kill all disk chunks files
auto pChunks = m_tRtChunks.DiskChunks();
pChunks->for_each ( [] ( ConstDiskChunkRefPtr_t& t ) { t->m_bFinallyUnlink = true; } );
{
// remove all chunks and segments
auto tChangeset = RtWriter();
tChangeset.InitRamSegs ( RtWriter_c::empty );
tChangeset.InitDiskChunks ( RtWriter_c::empty );
}
// reset cache
QcacheDeleteIndex ( GetIndexId() );
return true;
}
void RtIndex_c::SetKillHookFor ( IndexSegment_c* pAccum, int iDiskChunkID ) const
{
// that ensures no concurrency
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
ProcessDiskChunkByID ( iDiskChunkID, [pAccum] ( const DiskChunk_c* p ) { p->Cidx().SetKillHook ( pAccum ); } );
}
void RtIndex_c::SetKillHookFor ( IndexSegment_c* pAccum, VecTraits_T<int> dDiskChunkIDs ) const
{
// that ensures no concurrency
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
ProcessDiskChunkByID ( dDiskChunkIDs, [pAccum] ( const DiskChunk_c* p ) { p->Cidx().SetKillHook ( pAccum ); } );
}
// track internals of merge to catch critical points necessary for concurrent kill and update
class RTMergeCb_c final: public MergeCb_c
{
KillAccum_t m_tKilledWhileMerge;
RtIndex_c* m_pOwner;
CSphVector<int> m_dTrackedChunks;
const DiskChunk_c* m_pChunk = nullptr;
int64_t m_iLastPayload = -1;
public:
NONCOPYMOVABLE ( RTMergeCb_c );
RTMergeCb_c ( std::atomic<bool>* pStop, RtIndex_c* pOwner )
: MergeCb_c ( pStop )
, m_pOwner ( pOwner )
{}
void SetEvent ( Event_e eEvent, int64_t iPayload ) final NO_THREAD_SAFETY_ANALYSIS
{
assert ( m_pOwner );
RTLOGV << "SetEvent (" << eEvent << ", " << iPayload << ")";
switch ( eEvent )
{
case E_COLLECT_START:
m_dTrackedChunks.Add ( (int)iPayload );
m_pOwner->SetKillHookFor ( &m_tKilledWhileMerge, (int)iPayload );
break;
case E_MERGEATTRS_START: // enter serial state/rlock
m_iLastPayload = iPayload;
m_pOwner->ProcessDiskChunkByID ( (int)iPayload, [this] ( const DiskChunk_c* p ) { m_pChunk = p; } );
m_pChunk->m_tLock.ReadLock ();
m_pChunk->CastIdx().m_bAttrsBusy.store ( true, std::memory_order_release );
break;
case E_MERGEATTRS_PULSE: // inside serial state/rlock
#ifndef NDEBUG
assert ( m_iLastPayload==iPayload );
m_pOwner->ProcessDiskChunkByID ( (int)iPayload, [this] ( const DiskChunk_c* p ) { assert ( m_pChunk == p); } );
#endif
if ( m_pChunk && m_pChunk->m_iPendingUpdates.load ( std::memory_order_relaxed )>0 && m_pChunk->m_tLock.TestNextWlock() )
{
m_pChunk->m_tLock.Unlock (); // pulse lock that update can catch
Threads::Coro::Reschedule();
m_pChunk->m_tLock.ReadLock ();
}
break;
case E_MERGEATTRS_FINISHED: // leave serial state/rlock
#ifndef NDEBUG
assert ( m_iLastPayload==iPayload );
m_pOwner->ProcessDiskChunkByID ( (int)iPayload, [this] ( const DiskChunk_c* p ) { assert ( m_pChunk == p); } );
#endif
m_pChunk->m_tLock.Unlock ();
m_iLastPayload = -1;
m_pChunk = nullptr;
break;
default:
break;
}
}
~RTMergeCb_c() final
{
assert ( m_pOwner );
m_pOwner->SetKillHookFor ( nullptr, m_dTrackedChunks );
}
CSphVector<DocID_t> GetKilled()
{
CSphVector<DocID_t> dDocs;
m_tKilledWhileMerge.m_dDocids.SwapData ( dDocs );
dDocs.Uniq();
return dDocs;
}
bool HasKilled() const
{
return !m_tKilledWhileMerge.m_dDocids.IsEmpty();
}
};
static StringBuilder_c & operator<< ( StringBuilder_c & dOut, MergeCb_c::Event_e eVal )
{
switch ( eVal )
{
case MergeCb_c::E_IDLE: return dOut << "E_IDLE";
case MergeCb_c::E_COLLECT_START: return dOut << "E_COLLECT_START";
case MergeCb_c::E_COLLECT_FINISHED: return dOut << "E_COLLECT_FINISHED";
case MergeCb_c::E_MERGEATTRS_START: return dOut << "E_MERGEATTRS_START";
case MergeCb_c::E_MERGEATTRS_FINISHED: return dOut << "E_MERGEATTRS_FINISHED";
case MergeCb_c::E_KEYWORDS: return dOut << "E_KEYWORDS";
case MergeCb_c::E_FINISHED: return dOut << "E_FINISHED";
default: dOut.Sprintf ( "UNKNWN_", (int)eVal );
}
return dOut;
}
static StringBuilder_c& operator<< ( StringBuilder_c& dOut, OptimizeTask_t::OptimizeVerb_e eVerb )
{
switch ( eVerb )
{
case OptimizeTask_t::eManualOptimize: return dOut << "eManualOptimize";
case OptimizeTask_t::eDrop: return dOut << "eDrop";
case OptimizeTask_t::eCompress: return dOut << "eCompress";
case OptimizeTask_t::eSplit: return dOut << "eSplit";
case OptimizeTask_t::eMerge: return dOut << "eMerge";
case OptimizeTask_t::eAutoOptimize: return dOut << "eAutoOptimize";
default: dOut.Sprintf ( "eUnknown_", (int)eVerb );
}
return dOut;
}
static StringBuilder_c& operator<< ( StringBuilder_c& dOut, const OptimizeTask_t& tTask )
{
switch ( tTask.m_eVerb )
{
case OptimizeTask_t::eManualOptimize: return dOut << "eManualOptimize, cutoff = " << tTask.m_iCutoff;
case OptimizeTask_t::eDrop: return dOut << "eDrop " << tTask.m_iFrom;
case OptimizeTask_t::eCompress: return dOut << "eCompress " << tTask.m_iFrom;
case OptimizeTask_t::eSplit: return dOut << "eSplit " << tTask.m_iFrom << " filtering with '" << tTask.m_sUvarFilter << "'";
case OptimizeTask_t::eMerge: return dOut << "eMerge from " << tTask.m_iFrom << " to " << tTask.m_iTo;
case OptimizeTask_t::eAutoOptimize: return dOut << "eAutoOptimize, cutoff = " << tTask.m_iCutoff;
default: return dOut << tTask.m_eVerb << " cutoff=" << tTask.m_iCutoff << " From=" << tTask.m_iFrom << " To=" << tTask.m_iTo << " VarFilter= '" << tTask.m_sUvarFilter << "' bByOrder=" << tTask.m_bByOrder;
}
}
//////////////////////////////////////////////////////////////////////////
// OPTIMIZE
//////////////////////////////////////////////////////////////////////////
static int64_t GetEffectiveSize (const CSphIndexStatus& tStatus, int64_t iTotalDocs )
{
if ( !tStatus.m_iDead ) // no docs killed
return tStatus.m_iDiskUse;
if ( tStatus.m_iDead==iTotalDocs ) // all docs killed
return 0;
// has killed docs: use naive formulae, simple part of used disk proportional to partial of alive docs
auto fPart = double ( tStatus.m_iDead ) / double ( iTotalDocs );
return int64_t ( tStatus.m_iDiskUse * ( 1.0f-fPart ) );
}
static int64_t GetChunkSize ( const CSphIndex& tIndex )
{
CSphIndexStatus tDisk;
tIndex.GetStatus ( &tDisk );
return GetEffectiveSize ( tDisk, tIndex.GetStats().m_iTotalDocuments );
}
struct ChunkAndSize_t
{
int m_iId;
int64_t m_iSize;
};
static ChunkAndSize_t GetNextSmallestChunkByID ( const DiskChunkVec_c& dDiskChunks, int iChunkID )
{
int iRes = -1;
int64_t iLastSize = INT64_MAX;
for ( const auto& pDiskChunk : dDiskChunks )
{
if ( pDiskChunk->m_bOptimizing.load(std::memory_order_relaxed) )
continue;
const CSphIndex& dDiskChunk = pDiskChunk->Cidx();
int64_t iSize = GetChunkSize ( dDiskChunk );
if ( iSize < iLastSize && iChunkID != dDiskChunk.m_iChunk )
{
iLastSize = iSize;
iRes = dDiskChunk.m_iChunk;
}
}
return { iRes, iLastSize };
}
static int GetNumOfOptimizingNow ( const DiskChunkVec_c& dDiskChunks )
{
return (int)dDiskChunks.count_of ( [] ( auto& i ) { return i->m_bOptimizing.load ( std::memory_order_relaxed ); } );
}
int RtIndex_c::ChunkIDByChunkIdx ( int iChunkIdx ) const
{
if ( iChunkIdx < 0 )
return iChunkIdx;
return m_tRtChunks.DiskChunks()->operator[] ( iChunkIdx )->Cidx().m_iChunk;
}
int64_t RtIndex_c::GetCountDistinct ( const CSphString & sAttr, CSphString & sModifiedAttr ) const
{
// fixme! add code to calculate distinct values in RAM segments
if ( m_tRtChunks.GetRamSegmentsCount() )
return -1;
auto pDiskChunks = m_tRtChunks.DiskChunks();
if ( !pDiskChunks || pDiskChunks->GetLength()!=1 )
return -1;
return (*pDiskChunks)[0]->Cidx().GetCountDistinct ( sAttr, sModifiedAttr );
}
int64_t RtIndex_c::GetCountFilter ( const CSphFilterSettings & tFilter, CSphString & sModifiedAttr ) const
{
// fixme! add code to calculate count(*) in RAM segments
if ( m_tRtChunks.GetRamSegmentsCount() )
return -1;
auto pDiskChunks = m_tRtChunks.DiskChunks();
if ( !pDiskChunks || !pDiskChunks->GetLength() )
return 0;
int64_t iSumCount = 0;
for ( const auto & i : *pDiskChunks )
{
int64_t iCount = i->Cidx().GetCountFilter ( tFilter, sModifiedAttr );
if ( iCount==-1 )
return -1;
iSumCount += iCount;
}
return iSumCount;
}
int64_t RtIndex_c::GetCount() const
{
int64_t iCount = 0;
auto pSegs = m_tRtChunks.RamSegs();
if ( pSegs )
for ( auto & pSeg : *pSegs )
iCount += const_cast<RtSegment_t*> ( pSeg.Ptr() )->m_tAliveRows;
auto pDiskChunks = m_tRtChunks.DiskChunks();
if ( pDiskChunks )
for ( const auto & i : *pDiskChunks )
{
int64_t iChunkCount = i->Cidx().GetCount();
if ( iChunkCount==-1 )
return -1;
iCount += iChunkCount;
}
return iCount;
}
std::pair<int64_t,int> RtIndex_c::GetPseudoShardingMetric ( const VecTraits_T<const CSphQuery> & dQueries, const VecTraits_T<int64_t> & dMaxCountDistinct, int iThreads, bool & bForceSingleThread ) const
{
if ( MustRunInSingleThread ( dQueries, false, dMaxCountDistinct, bForceSingleThread ) )
return { 0, 1 };
auto tGuard = RtGuard();
int iThreadCap = GetPseudoSharding() ? 0 : tGuard.m_dDiskChunks.GetLength();
return { GetStats().m_iTotalDocuments, iThreadCap };
}
void RtIndex_c::DropDiskChunk ( int iChunkID, int* pAffected )
{
TRACE_SCHED ( "rt", "RtIndex_c::DropDiskChunk" );
sphLogDebug( "rt optimize: table %s: drop disk chunk %d", GetName(), iChunkID );
// work in serial fiber
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_SCHED ( "rt", "DropDiskChunk" );
{
auto tChangeset = RtWriter();
tChangeset.InitDiskChunks ( RtWriter_c::empty );
auto pChunks = m_tRtChunks.RtData().m_pChunks;
for ( auto& pChunk : *pChunks )
if ( iChunkID == pChunk->Cidx().m_iChunk )
pChunk->m_bFinallyUnlink = true;
else
tChangeset.m_pNewDiskChunks->Add ( pChunk );
}
SaveMeta();
if ( pAffected )
++*pAffected;
}
// perform merge, preload result, rename to final chunk and return preallocated result scheduled to dispose
ConstDiskChunkRefPtr_t RtIndex_c::MergeDiskChunks ( const char* szParentAction, const ConstDiskChunkRefPtr_t& pChunkA, const ConstDiskChunkRefPtr_t& pChunkB, CSphIndexProgress& tProgress, VecTraits_T<CSphFilterSettings> dFilters )
{
TRACE_CORO ( "rt", "RtIndex_c::MergeDiskChunks" );
CSphString sError;
const CSphIndex& tChunkA = pChunkA->Cidx();
const CSphIndex& tChunkB = pChunkB->Cidx();
ConstDiskChunkRefPtr_t pChunk;
// note: klist for merged chunk will be attached during merge at the moment of copying alive rows.
if ( !sphMerge ( &tChunkA, &tChunkB, dFilters, tProgress, sError ) )
{
if ( sError.IsEmpty() && tProgress.GetMergeCb().NeedStop() )
sError = "interrupted because of shutdown";
sphWarning ( "rt %s: table %s: failed to %s %s (%s)", szParentAction, GetName(), dFilters.IsEmpty() ? "merge" : "split", tChunkA.GetFilebase(), sError.cstr() );
return pChunk;
}
// PauseCheck ( "postmerge" );
auto fnFnameBuilder = GetIndexFilenameBuilder();
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder;
if ( fnFnameBuilder )
pFilenameBuilder = fnFnameBuilder ( GetName() );
// prealloc new (optimized) chunk
CSphString sChunk = tChunkA.GetFilename ( "tmp" );
StrVec_t dWarnings;
pChunk = DiskChunk_c::make ( PreallocDiskChunk ( sChunk, tChunkA.m_iChunk, pFilenameBuilder.get(), dWarnings, sError, tChunkA.GetName() ) );
dWarnings.for_each ( [] ( const auto& sWarning ) { sphWarning ( "PreallocDiskChunk warning: %s", sWarning.cstr() ); } );
if ( pChunk )
pChunk->m_bFinallyUnlink = true; // on destroy files will be deleted. Caller must explicitly reset this flag if chunk is usable
else
sphWarning ( "rt %s: table %s: failed to prealloc", szParentAction, GetName() );
return pChunk;
}
bool RtIndex_c::RenameOptimizedChunk ( const ConstDiskChunkRefPtr_t& pChunk, const char* szParentAction )
{
if ( !pChunk )
return false;
CSphIndex& tChunk = pChunk->CastIdx(); // const breakage is ok since we don't yet published the index
// prepare new chunk to publish
int iResID = m_tChunkID.MakeChunkId ( m_tRtChunks );
auto sNewchunk = GetFilename ( iResID );
tChunk.m_iChunk = iResID;
// rename merged disk chunk to valid chunk name
if ( tChunk.Rename ( sNewchunk ) )
return true;
sphWarning ( "rt %s: table %s: processed to cur rename failed (%s)", szParentAction, GetName(), tChunk.GetLastError().cstr() );
return false;
}
bool RtIndex_c::PublishMergedChunks ( const char* szParentAction, std::function<bool ( int, DiskChunkVec_c& )>&& fnPusher ) REQUIRES ( m_tWorkers.SerialChunkAccess() )
{
TRACE_CORO ( "rt", "PublishMergedChunks" );
bool bReplaced = false;
auto tChangeset = RtWriter();
tChangeset.InitDiskChunks ( RtWriter_c::empty );
auto pChunks = m_tRtChunks.DiskChunks();
for ( auto& pChunk : *pChunks )
{
if ( fnPusher ( pChunk->Cidx().m_iChunk, *tChangeset.m_pNewDiskChunks ) )
bReplaced = true;
else
tChangeset.m_pNewDiskChunks->Add ( pChunk );
}
if ( !bReplaced )
{
sphWarning ( "rt %s: table %s: unable to locate victim chunk after merge, leave everything unchanged", szParentAction, GetName() );
tChangeset.m_pNewDiskChunks = nullptr; // discard changes, i.e. disk chunk set will NOT be modified
return false;
}
return true;
}
static int64_t NumAliveDocs ( const CSphIndex& dChunk )
{
CSphIndexStatus tStatus;
dChunk.GetStatus ( &tStatus );
return dChunk.GetStats().m_iTotalDocuments - tStatus.m_iDead;
}
bool RtIndex_c::BinlogCommit ( RtSegment_t * pSeg, const VecTraits_T<DocID_t> & dKlist, int64_t iAddTotalBytes, CSphString & sError ) REQUIRES ( pSeg->m_tLock )
{
// Tracer::AsyncOp tTracer ( "rt", "RtIndex_c::BinlogCommit" );
return Binlog::Commit ( &m_iTID, GetName(), sError, [pSeg,&dKlist,iAddTotalBytes,bKeywordDict=m_bKeywordDict] (Writer_i & tWriter) REQUIRES ( pSeg->m_tLock )
{
tWriter.PutByte ( Binlog::COMMIT );
if ( !pSeg || !pSeg->m_uRows )
{
tWriter.ZipOffset ( 0 );
Binlog::SaveVector ( tWriter, dKlist );
return;
}
tWriter.ZipOffset ( pSeg->m_uRows );
tWriter.ZipOffset ( iAddTotalBytes );
Binlog::SaveVector ( tWriter, pSeg->m_dWords );
tWriter.ZipOffset ( pSeg->m_dWordCheckpoints.GetLength() );
if ( !bKeywordDict )
{
for ( const auto& dWordCheckpoint : pSeg->m_dWordCheckpoints )
{
tWriter.ZipOffset ( dWordCheckpoint.m_iOffset );
tWriter.ZipOffset ( dWordCheckpoint.m_uWordID );
}
} else
{
const auto * pBase = (const char *)pSeg->m_dKeywordCheckpoints.Begin();
for ( const auto & dWordCheckpoint : pSeg->m_dWordCheckpoints )
{
tWriter.ZipOffset ( dWordCheckpoint.m_iOffset );
tWriter.ZipOffset ( dWordCheckpoint.m_szWord - pBase );
}
}
Binlog::SaveVector ( tWriter, pSeg->m_dDocs );
Binlog::SaveVector ( tWriter, pSeg->m_dHits );
Binlog::SaveVector ( tWriter, pSeg->m_dRows );
Binlog::SaveVector ( tWriter, pSeg->m_dBlobs );
Binlog::SaveVector ( tWriter, pSeg->m_dKeywordCheckpoints );
tWriter.PutByte ( pSeg->m_pDocstore ? 1 : 0 );
if ( pSeg->m_pDocstore )
pSeg->m_pDocstore->Save ( tWriter );
tWriter.PutByte ( pSeg->m_pColumnar ? 1 : 0 );
if ( pSeg->m_pColumnar )
pSeg->m_pColumnar->Save ( tWriter );
Binlog::SaveVector ( tWriter, dKlist );
});
}
static Binlog::CheckTnxResult_t Warn ( CSphString & sError, const CSphReader & tReader )
{
sError = tReader.GetErrorMessage();
return {};
}
Binlog::CheckTnxResult_t RtIndex_c::ReplayCommit ( CSphReader & tReader, CSphString & sError, Binlog::CheckTxn_fn && fnCanContinue )
{
CSphRefcountedPtr<RtSegment_t> pSeg;
CSphVector<DocID_t> dKlist;
int64_t iAddTotalBytes = 0;
DWORD uRows = tReader.UnzipOffset();
if ( uRows )
{
iAddTotalBytes = (int64_t)tReader.UnzipOffset();
pSeg = new RtSegment_t(uRows, m_tSchema);
FakeWL_t _ ( pSeg->m_tLock );
if ( !Binlog::LoadVector ( tReader, pSeg->m_dWords ) ) return Warn ( sError, tReader );
pSeg->m_dWordCheckpoints.Resize ( (int) tReader.UnzipOffset() ); // FIXME! sanity check
ARRAY_FOREACH ( i, pSeg->m_dWordCheckpoints )
{
pSeg->m_dWordCheckpoints[i].m_iOffset = (int) tReader.UnzipOffset();
pSeg->m_dWordCheckpoints[i].m_uWordID = (SphWordID_t )tReader.UnzipOffset();
}
if ( tReader.GetErrorFlag() ) return Warn ( sError, tReader );
if ( !Binlog::LoadVector ( tReader, pSeg->m_dDocs ) ) return Warn ( sError, tReader );
if ( !Binlog::LoadVector ( tReader, pSeg->m_dHits ) ) return Warn ( sError, tReader );
if ( !Binlog::LoadVector ( tReader, pSeg->m_dRows ) ) return Warn ( sError, tReader );
if ( !Binlog::LoadVector ( tReader, pSeg->m_dBlobs ) ) return Warn ( sError, tReader );
if ( !Binlog::LoadVector ( tReader, pSeg->m_dKeywordCheckpoints ) ) return Warn ( sError, tReader );
bool bHaveDocstore = !!tReader.GetByte();
if ( tReader.GetErrorFlag() ) return Warn ( sError, tReader );
if ( bHaveDocstore )
{
pSeg->SetupDocstore ( &(GetInternalSchema()) );
assert ( pSeg->m_pDocstore );
Verify ( pSeg->m_pDocstore->Load(tReader) );
}
bool bHaveColumnar = !!tReader.GetByte();
if ( tReader.GetErrorFlag() ) return Warn ( sError, tReader );
if ( bHaveColumnar )
{
CSphString sError;
pSeg->m_pColumnar = CreateColumnarRT ( GetInternalSchema(), tReader, sError );
if ( !pSeg->m_pColumnar )
return {};
}
pSeg->BuildDocID2RowIDMap ( GetInternalSchema() );
}
if ( !Binlog::LoadVector ( tReader, dKlist ) ) return Warn ( sError, tReader );
Binlog::CheckTnxResult_t tRes = fnCanContinue ();
if ( tRes.m_bValid && tRes.m_bApply )
{
// in case dict=keywords
// + cook checkpoint
// + build infixes
if ( IsWordDict() && pSeg )
{
FixupSegmentCheckpoints ( pSeg );
BuildSegmentInfixes ( pSeg, GetDictionary()->HasMorphology(), IsWordDict(), GetSettings().m_iMinInfixLen,
GetWordCheckoint(), ( GetMaxCodepointLength()>1 ), GetSettings().m_eHitless );
}
// actually replay
FakeRL_t _ ( pSeg.operator RtSegment_t*()->m_tLock);
int iKilled = 0;
CommitReplayable ( pSeg, dKlist, iAddTotalBytes, iKilled, sError );
tRes.m_bApply = true;
}
return tRes;
}
Binlog::CheckTnxResult_t RtIndex_c::ReplayTxn ( CSphReader & tReader, CSphString & sError, BYTE uOp, Binlog::CheckTxn_fn && fnCanContinue )
{
switch ( uOp )
{
case Binlog::UPDATE_ATTRS: return ReplayUpdate ( tReader, sError, std::move ( fnCanContinue ) );
case Binlog::COMMIT: return ReplayCommit ( tReader, sError, std::move ( fnCanContinue ) );
default: assert (false && "unknown op provided to replay");
}
return {};
}
bool RtIndex_c::SkipOrDrop ( int iChunkID, const CSphIndex& dChunk, bool bCheckAlive, int * pAffected )
{
auto iTotalDocs = dChunk.GetStats().m_iTotalDocuments;
auto iAliveDocs = NumAliveDocs ( dChunk );
// all docs killed
if ( !iAliveDocs )
{
sphLogDebug ( "common merge - drop %d, all (" INT64_FMT ") killed", iChunkID, iTotalDocs );
DropDiskChunk ( iChunkID, pAffected );
return true;
}
// no docs killed
if ( bCheckAlive && iTotalDocs == iAliveDocs )
{
sphLogDebug ( "common merge - skip compressing %d, no killed", iChunkID );
return true;
}
if ( pAffected )
++*pAffected;
return false;
}
bool RtIndex_c::CompressOneChunk ( int iChunkID, int* pAffected )
{
TRACE_CORO ( "rt", "RtIndex_c::CompressOneChunk" );
auto pVictim = m_tRtChunks.DiskChunkByID ( iChunkID );
if ( !pVictim )
{
sphWarning ( "rt optimize: table %s: compress of chunk %d failed, no chunk with such ID!", GetName(), iChunkID );
return false;
}
const CSphIndex& tVictim = pVictim->Cidx();
if ( SkipOrDrop ( iChunkID, tVictim, true, pAffected ) )
return true;
sphLogDebug ( "compress %d (%d kb)", iChunkID, (int)( GetChunkSize ( tVictim ) / 1024 ) );
pVictim->m_bOptimizing.store ( true, std::memory_order_relaxed );
auto tResetOptimizing = AtScopeExit ( [pVictim] { pVictim->m_bOptimizing.store ( false, std::memory_order_relaxed ); } );
// merge data to disk ( data is constant during that phase )
RTMergeCb_c tMonitor ( &m_bOptimizeStop, this );
CSphIndexProgress tProgress ( &tMonitor );
auto pCompressed = MergeDiskChunks ( "compress", pVictim, pVictim, tProgress, { nullptr, 0 } );
auto tFinallyStopCollectingUpdates = AtScopeExit ( [pVictim] { pVictim->CastIdx().ResetPostponedUpdates(); } );
if ( !pCompressed )
return false;
if ( !RenameOptimizedChunk ( pCompressed, "compress" ) )
return false;
if ( tMonitor.HasKilled() && tMonitor.NeedStop() ) // if we should interrupt now, but if no kills, can continue.
return false;
CSphIndex& tCompressed = pCompressed->CastIdx(); // const breakage is ok since we don't yet published the index
// going to modify list of chunks; so fall into serial fiber
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_CORO ( "rt", "CompressOneChunk.serial" );
// reset kill hook explicitly to override default order of destruction
SetKillHookFor ( nullptr, iChunkID );
// and also apply already collected kills
int iKilled = tCompressed.KillMulti ( tMonitor.GetKilled() );
// and also apply collected updates
auto& dUpdates = pVictim->CastIdx().m_dPostponedUpdates;
if ( !dUpdates.IsEmpty() )
{
tCompressed.UpdateAttributesOffline ( dUpdates );
dUpdates.Reset();
}
if ( !PublishMergedChunks ( "compress", [iChunkID, pCompressed] ( int iChunk, DiskChunkVec_c& tRes ) {
if ( iChunk==iChunkID )
tRes.Add ( pCompressed );
return iChunk==iChunkID; } ) )
return false;
sphLogDebug ( "compressed a=%s, new=%s, killed=%d", tVictim.GetFilebase(), tCompressed.GetFilebase(), iKilled );
pVictim->m_bFinallyUnlink = true;
pCompressed->m_bFinallyUnlink = false;
SaveMeta();
Preread();
if ( pAffected )
++*pAffected;
return true;
}
bool RtIndex_c::DedupOneChunk ( int iChunkID, int* pAffected )
{
TRACE_CORO ( "rt", "RtIndex_c::DedupOneChunk" );
auto pVictim = m_tRtChunks.DiskChunkByID ( iChunkID );
if ( !pVictim )
{
sphWarning ( "rt optimize: table %s: dedup of chunk %d failed, no chunk with such ID!", GetName(), iChunkID );
return false;
}
const CSphIndex& tVictim = pVictim->Cidx();
if ( SkipOrDrop ( iChunkID, tVictim, false, pAffected ) )
return true;
sphLogDebug ( "dedup %d (%d docs)", iChunkID, (int)tVictim.GetCount() );
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_CORO ( "rt", "DedupOneChunk.serial" );
// and also apply already collected kills
int iKilled = pVictim->CastIdx().KillDupes();
m_tStats.m_iTotalDocuments -= iKilled;
if ( pAffected && iKilled>0 )
*pAffected += 1;
return true;
}
// catch common cases where we can't work at all (erroneous settings, etc.), or can redirect to another action
bool RtIndex_c::SplitOneChunkFast ( int iChunkID, const char* szUvarFilter, bool& bResult, int* pAffected )
{
if ( !szUvarFilter )
{
sphLogDebug ( "zero filter provided to split chunk %d, perform simple compress", iChunkID );
bResult = CompressOneChunk ( iChunkID, pAffected );
return true;
}
auto pVictim = m_tRtChunks.DiskChunkByID ( iChunkID );
if ( !pVictim )
{
sphWarning ( "rt optimize: table %s: split of chunk %d failed, no chunk with such ID!", GetName(), iChunkID );
bResult = false;
return true;
}
const CSphIndex& tVictim = pVictim->Cidx();
if ( SkipOrDrop ( iChunkID, tVictim, false, pAffected ) )
{
bResult = true;
return true;
}
if ( !NumAliveDocs ( tVictim ) )
{
sphLogDebug ( "chunk empty, nothing to split" );
bResult = false;
return true;
}
// create filter by @uservar - break if it is not available.
if ( !UservarsAvailable() )
{
sphLogDebug ( "no global variables found" );
bResult = false;
return true;
}
const UservarIntSet_c pUservar = Uservars ( szUvarFilter );
if ( !pUservar )
{
sphLogDebug ( "undefined global variable '%s'", szUvarFilter );
bResult = false;
return true;
}
return false;
}
// compress iChunk with filter id by uservar include and exclude,
// then replace original chunk with one of pieces, and insert second piece just after the first.
bool RtIndex_c::SplitOneChunk ( int iChunkID, const char* szUvarFilter, int* pAffected )
{
TRACE_CORO ( "rt", "RtIndex_c::SplitOneChunk" );
bool bResult;
if ( SplitOneChunkFast ( iChunkID, szUvarFilter, bResult, pAffected ) )
return bResult;
auto pVictim = m_tRtChunks.DiskChunkByID ( iChunkID );
assert ( pVictim && "non-existent chunks should be already rejected by SplitOneChunkFast" );
CSphIndex& tVictim = pVictim->CastIdx(); // non-const need to invoke 'merge'
sphLogDebug ( "split %d (%d kb) with %s", iChunkID, (int)( GetChunkSize ( tVictim ) / 1024 ), szUvarFilter );
pVictim->m_bOptimizing.store ( true, std::memory_order_relaxed );
auto tResetOptimizing = AtScopeExit ( [pVictim] { pVictim->m_bOptimizing.store ( false, std::memory_order_relaxed ); } );
const UservarIntSet_c pUservar = Uservars ( szUvarFilter );
assert ( pUservar ); // detailed check already performed in splitOneChunkFast
// create negative (exclusion) filter
CSphVector<CSphFilterSettings> dFilters;
auto & dFilter = dFilters.Add ();
dFilter.m_sAttrName = "id";
dFilter.m_eType = SPH_FILTER_VALUES;
dFilter.SetExternalValues ( *pUservar );
dFilter.m_bExclude = true;
// prepare for real split (merge)
RTMergeCb_c tMonitor ( &m_bOptimizeStop, this );
CSphIndexProgress tProgress ( &tMonitor );
auto iOriginallyAlive = NumAliveDocs ( tVictim );
// get 1-st chunk - one which doesn't match filter the filter
auto pChunkE = MergeDiskChunks ( "1-st part of split", pVictim, pVictim, tProgress, dFilters );
auto tFinallyStopCollectingUpdatesE = AtScopeExit ( [pVictim] { pVictim->CastIdx().ResetPostponedUpdates(); } );
// check forced exit after long operation (that is - after merge)
if ( !pChunkE || tMonitor.NeedStop() )
return false;
CSphIndex& tIndexE = pChunkE->CastIdx(); // const breakage is ok since we don't yet published the index
// if nothing is alive after filter applied - fast break, nothing to do.
auto iExcludedAlive = NumAliveDocs ( tIndexE );
if ( !iExcludedAlive )
{
// fool protect - either nothing, either all is filtered. No need to continue.
sphLogDebug ( "filter selected nothing, no point to split" );
return false;
} else if ( iExcludedAlive == iOriginallyAlive )
{
// fool protect - either nothing, either all is filtered. No need to continue.
sphLogDebug ( "filter selected everything, no point to split" );
return false;
}
if ( !RenameOptimizedChunk ( pChunkE, "1-st part of split" ) )
return false;
// prepare <I>ncluded chunk - one with included docs, it will be placed instead of original one
dFilter.m_bExclude = false;
auto pChunkI = MergeDiskChunks ( "2-nd part of split", pVictim, pVictim, tProgress, dFilters );
// check forced exit after long operation (that is - after merge)
if ( !pChunkI || tMonitor.NeedStop() )
return false;
CSphIndex& tIndexI = pChunkI->CastIdx(); // const breakage is ok since we don't yet published the index
if ( tMonitor.HasKilled() && tMonitor.NeedStop() ) // if we should interrupt now, but if no kills, can continue.
return false;
if ( !RenameOptimizedChunk ( pChunkI, "2-nd part of split" ) )
return false;
// going to modify list of chunks; so fall into serial fiber
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_CORO ( "rt", "SplitOneChunk" );
// reset kill hook explicitly to override default order of destruction
SetKillHookFor ( nullptr, iChunkID );
// apply collected kill-list before including chunks to the set
// as we are in serial worker, that is safe here; no new kills may arrive.
int iKilled = 0;
if ( tMonitor.HasKilled() )
{
auto dKilled = tMonitor.GetKilled();
iKilled += tIndexE.KillMulti ( dKilled );
iKilled += tIndexI.KillMulti ( dKilled );
}
// and also apply collected updates
auto& dUpdates = pVictim->CastIdx().m_dPostponedUpdates;
if ( !dUpdates.IsEmpty() )
{
tIndexI.UpdateAttributesOffline ( dUpdates );
tIndexE.UpdateAttributesOffline ( dUpdates );
dUpdates.Reset();
}
if ( !PublishMergedChunks ( "split",
[iChunkID, pChunkI, pChunkE] ( int iChunk, DiskChunkVec_c& tRes ) {
if ( iChunk==iChunkID )
{
tRes.Add ( pChunkI );
tRes.Add ( pChunkE );
return true;
}
return false; } ) )
return false;
sphLogDebug ( "split a=%s, b=%s, killed=%d", tIndexE.GetFilebase(), tIndexI.GetFilebase(), iKilled );
pVictim->m_bFinallyUnlink = true;
pChunkI->m_bFinallyUnlink = false;
pChunkE->m_bFinallyUnlink = false;
SaveMeta();
Preread();
if ( pAffected )
++*pAffected;
return true;
}
bool RtIndex_c::MergeTwoChunks ( int iAID, int iBID, int* pAffected, CSphString* sLog )
{
TRACE_CORO ( "rt", "RtIndex_c::MergeTwoChunks" );
auto pA = m_tRtChunks.DiskChunkByID ( iAID );
if ( !pA )
{
sphWarning ( "rt optimize: table %s: merge chunks %d and %d failed, chunk ID %d is not valid!", GetName(), iAID, iBID, iAID );
return false;
}
auto pB = m_tRtChunks.DiskChunkByID ( iBID );
if ( !pB )
{
sphWarning ( "rt optimize: table %s: merge chunks %d and %d failed, chunk ID %d is not valid!", GetName(), iAID, iBID, iBID );
return false;
}
pA->m_bOptimizing.store ( true, std::memory_order_relaxed );
auto tResetOptimizingA = AtScopeExit ( [pA] { pA->m_bOptimizing.store ( false, std::memory_order_relaxed ); } );
pB->m_bOptimizing.store ( true, std::memory_order_relaxed );
auto tResetOptimizingB = AtScopeExit ( [pB] { pB->m_bOptimizing.store ( false, std::memory_order_relaxed ); } );
sphLogDebug ( "common merge - merging %d (%d kb) with %d (%d kb)",
iAID,
(int)( GetChunkSize ( pA->Cidx() ) / 1024 ),
iBID,
(int)( GetChunkSize ( pB->Cidx() ) / 1024 ) );
// merge data to disk ( data is constant during that phase )
RTMergeCb_c tMonitor ( &m_bOptimizeStop, this );
CSphIndexProgress tProgress ( &tMonitor );
// get 1-st chunk - one which doesn't match filter the filter
auto pMerged = MergeDiskChunks ( "common merge", pA, pB, tProgress, { nullptr, 0 } );
auto tFinallyStopCollectingUpdates = AtScopeExit ( [pA, pB] {
pA->CastIdx().ResetPostponedUpdates();
pB->CastIdx().ResetPostponedUpdates();
} );
// check forced exit after long operation (that is - after merge)
if ( !pMerged || tMonitor.NeedStop() )
return false;
if ( !RenameOptimizedChunk ( pMerged, "common merge" ) )
return false;
CSphIndex& tMerged = pMerged->CastIdx(); // const breakage is ok since we don't yet published the index
// going to modify list of chunks; so fall into serial fiber
TRACE_CORO ( "rt", "RtIndex_c::MergeTwoChunks_workserial" );
BEGIN_CORO ( "wait", "RtIndex_c::acquire serial fiber" );
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
END_CORO ("wait" );
// reset kill hook explicitly to override default order of destruction
SetKillHookFor ( nullptr, iAID );
SetKillHookFor ( nullptr, iBID );
// apply collected kill-list before including chunks to the set
// as we are in serial worker, that is safe here; no new kills may arrive.
int iKilled = 0;
if ( tMonitor.HasKilled() )
iKilled = tMerged.KillMulti ( tMonitor.GetKilled() );
// and also apply collected updates
CSphVector<ConstDiskChunkRefPtr_t> tUpdated;
tUpdated.Add ( pA );
tUpdated.Add ( pB );
auto dUpdates = GatherUpdates::FromChunksOrSegments ( tUpdated );
if ( !dUpdates.IsEmpty() )
{
tMerged.UpdateAttributesOffline ( dUpdates );
dUpdates.Reset();
}
if ( !PublishMergedChunks ( "optimize", [iAID, iBID, pMerged] ( int iChunk, DiskChunkVec_c& tRes ) {
if ( iChunk == iBID )
tRes.Add ( pMerged );
return ( iChunk == iAID || iChunk == iBID );
} ) )
return false;
sphLogDebug ( "optimized a=%s, b=%s, new=%s, killed=%d", pA->Cidx().GetFilebase(), pB->Cidx().GetFilebase(), tMerged.GetFilebase(), iKilled );
if ( sLog )
sLog->SetSprintf ("%s and %s to %s", pA->Cidx ().GetFilebase(), pB->Cidx ().GetFilebase (), tMerged.GetFilebase ());
pA->m_bFinallyUnlink = true;
pB->m_bFinallyUnlink = true;
pMerged->m_bFinallyUnlink = false;
SaveMeta();
Preread();
if ( pAffected )
++*pAffected;
return true;
}
bool RtIndex_c::StopOptimize()
{
auto bPrevOptimizeValue = m_bOptimizeStop.exchange ( true, std::memory_order_relaxed );
std::atomic_thread_fence ( std::memory_order_release ); // to be sure we go to Wait() _after_ m_bOptimizeStop is set to true.
if ( Threads::IsInsideCoroutine() )
m_tOptimizeRuns.Wait ( [] ( int i ) { return i <= 0; } );
return bPrevOptimizeValue;
}
bool RtIndex_c::CheckValidateChunk ( int& iChunk, int iChunks, bool bByOrder ) const
{
if ( bByOrder )
{
if ( iChunk >= iChunks || iChunk < 0 )
{
sphWarning ( "rt: table %s: Optimize step: provided chunk %d is out of range [0..%d]", GetName(), iChunk, iChunks - 1 );
return false;
}
iChunk = ChunkIDByChunkIdx ( iChunk );
}
auto pChunk = m_tRtChunks.DiskChunkByID ( iChunk );
if ( !pChunk )
{
sphWarning ( "rt: table %s: Optimize step: provided chunk ID %d is invalid", GetName(), iChunk );
return false;
}
if ( pChunk->m_bOptimizing.load ( std::memory_order_relaxed ) )
{
sphWarning ( "rt: table %s: Optimize step: provided chunk %d is now occupied in another optimize operation", GetName(), iChunk );
return false;
}
return true;
}
bool RtIndex_c::CheckValidateOptimizeParams ( OptimizeTask_t& tTask ) const
{
auto iChunks = m_tRtChunks.GetDiskChunksCount();
switch ( tTask.m_eVerb )
{
case OptimizeTask_t::eMerge:
if ( !CheckValidateChunk ( tTask.m_iTo, iChunks, tTask.m_bByOrder ) )
return false;
// no break; check also m_iFrom param then
case OptimizeTask_t::eDrop:
case OptimizeTask_t::eCompress:
case OptimizeTask_t::eSplit:
case OptimizeTask_t::eDedup:
if ( !CheckValidateChunk ( tTask.m_iFrom, iChunks, tTask.m_bByOrder ) )
return false;
default: break;
}
tTask.m_bByOrder=false;
return true;
}
bool RtIndex_c::StartOptimize ( OptimizeTask_t tTask )
{
if ( GetNumOfLocks ()>0 )
return false;
Threads::StartJob ( [tTask = std::move ( tTask ), this] () {
// want to track optimize only at work
auto pDesc = PublishSystemInfo ( "OPTIMIZE" );
Optimize ( std::move ( tTask ) );
} );
return true;
}
int RtIndex_c::OptimizesRunning() const noexcept
{
return m_tOptimizeRuns.GetValue ();
}
int RtIndex_c::GetNumOfLocks () const noexcept
{
return m_tSaving.GetNumOfLocks();
}
void RtIndex_c::Optimize ( OptimizeTask_t tTask )
{
TRACE_CORO ( "rt", "RtIndex_c::Optimize" );
RTDLOG << "Optimize invoked with " << tTask;
if ( !CheckValidateOptimizeParams ( tTask ) )
return;
RTDLOG << "Optimize checked with " << tTask;
if ( m_tOptimizeRuns.GetValue() > 0 )
{
RTDLOG << "Escape optimize as " << m_tOptimizeRuns.GetValue() << " tasks is already running";
sphLogDebug ( "Escape optimize as %d tasks is already running", m_tOptimizeRuns.GetValue() );
return;
}
if ( !MergeCanRun() )
return;
sphLogDebug ( "rt optimize: table %s: optimization started", GetName() );
int64_t tmStart = sphMicroTimer();
m_tOptimizeRuns.ModifyValue ( [] ( int& i ) { ++i; } );
auto iChunks = CommonOptimize ( std::move ( tTask ) );
m_tOptimizeRuns.ModifyValueAndNotifyAll ( [] ( int& i ) { --i; } );
int64_t tmPass = sphMicroTimer() - tmStart;
int iDiskChunks = m_tRtChunks.GetDiskChunksCount();
if ( sphInterrupted() )
LogWarning ( "rt: table %s: optimization terminated chunk(s) %d ( left %d ) in %.3t", GetName(), iChunks, iDiskChunks, tmPass );
else if ( iChunks > 0 )
LogInfo ( "rt: table %s: optimized %s chunk(s) %d ( left %d ) in %.3t", GetName(), g_bProgressiveMerge ? "progressive" : "regular", iChunks, iDiskChunks, tmPass );
}
bool RtIndex_c::MergeCanRun() const
{
return !sphInterrupted() && !m_bOptimizeStop.load(std::memory_order_relaxed);
}
int RtIndex_c::ClassicOptimize ()
{
TRACE_SCHED ( "rt", "RtIndex_c::ClassicOptimize" );
RTDLOG << "Start ClassicOptimize()";
int iAffected = 0;
bool bWork = true;
while ( bWork && m_tRtChunks.GetDiskChunksCount() >= 2 )
bWork &= MergeCanRun() && MergeTwoChunks ( ChunkIDByChunkIdx ( 0 ), ChunkIDByChunkIdx ( 1 ), &iAffected );
return iAffected;
}
static int GetCutOff ( const MutableIndexSettings_c & tSettings )
{
if ( tSettings.IsSet ( MutableName_e::OPTIMIZE_CUTOFF ) )
return tSettings.m_iOptimizeCutoff;
else
return MutableIndexSettings_c::GetDefaults().m_iOptimizeCutoff;
}
int RtIndex_c::ProgressiveOptimize ( int iCutoff )
{
TRACE_CORO ( "rt", "RtIndex_c::ProgressiveOptimize" );
int iAffected = 0;
if ( !iCutoff )
iCutoff = GetCutOff ( m_tMutableSettings );
bool bWork = true;
while ( bWork &= MergeCanRun() )
{
auto pChunks = m_tRtChunks.DiskChunks();
if ( ( pChunks->GetLength() - GetNumOfOptimizingNow ( *pChunks ) ) <= iCutoff )
break;
auto tmStart = sphMicroTimer();
// merge 'smallest' to 'smaller' and get 'merged' that names like 'A'+.tmp
// however 'merged' got placed at 'B' position and 'merged' renamed to 'B' name
auto chA = GetNextSmallestChunkByID ( *pChunks, -1 );
if ( !chA.m_iSize ) // empty chunk - just remove
{
RTDLOG << "Optimize: drop chunk " << chA.m_iId;
DropDiskChunk ( chA.m_iId, &iAffected );
continue;
}
auto chB = GetNextSmallestChunkByID ( *pChunks, chA.m_iId );
if ( chA.m_iId < 0 || chB.m_iId < 0 )
{
// sphWarning ( "Couldn't find smallest chunk" );
break;
}
// we need to make sure that A is the oldest one
// indexes go from oldest to newest so A must go before B (A is always older than B)
// this is not required by bitmap killlists, but by some other stuff (like ALTER RECONFIGURE)
if ( chA.m_iId > chB.m_iId )
Swap ( chB, chA );
RTDLOG << "Optimize: merge chunks " << chA.m_iId << " and " << chB.m_iId;
CSphString sLog;
bWork &= MergeTwoChunks ( chA.m_iId, chB.m_iId, &iAffected, &sLog );
auto tmPass = sphMicroTimer() - tmStart;
LogInfo ( "rt: table %s: merged chunks %s in %t (progressive mode). Remaining chunk count: %d", GetName (), sLog.cstr(), tmPass, m_tRtChunks.GetDiskChunksCount () );
}
RTDLOG << "Optimize: start compressing pass for the rest of " << m_tRtChunks.GetDiskChunksCount() << " chunks.";
// light optimize (drop totally killed chunks) in the rest of the chunks
for ( int i = 0; bWork && i < m_tRtChunks.GetDiskChunksCount(); ++i )
{
auto pVictim = m_tRtChunks.DiskChunkByIdx ( i );
const CSphIndex& tVictim = pVictim->Cidx();
SkipOrDrop ( tVictim.m_iChunk, tVictim, false, &iAffected );
}
return iAffected;
}
int RtIndex_c::CommonOptimize ( OptimizeTask_t tTask )
{
TRACE_CORO ( "rt", "RtIndex_c::CommonOptimize" );
bool bProgressive = g_bProgressiveMerge;
int iChunks = 0;
switch ( tTask.m_eVerb ) // process all 'single' manual commands
{
case OptimizeTask_t::eMerge: MergeTwoChunks ( tTask.m_iFrom, tTask.m_iTo, &iChunks ); return iChunks;
case OptimizeTask_t::eDrop: DropDiskChunk ( tTask.m_iFrom, &iChunks ); return iChunks;
case OptimizeTask_t::eCompress: CompressOneChunk ( tTask.m_iFrom, &iChunks ); return iChunks;
case OptimizeTask_t::eDedup: DedupOneChunk ( tTask.m_iFrom, &iChunks ); return iChunks;
case OptimizeTask_t::eSplit: SplitOneChunk ( tTask.m_iFrom, tTask.m_sUvarFilter.cstr(), &iChunks ); return iChunks;
case OptimizeTask_t::eAutoOptimize:
bProgressive = true;
default:
break;
}
return bProgressive ? ProgressiveOptimize( tTask.m_iCutoff ) : ClassicOptimize();
}
void RtIndex_c::CheckStartAutoOptimize()
{
TRACE_SCHED ( "rt", "CheckStartAutoOptimize" );
int iCutoff = AutoOptimizeCutoffMultiplier();
RTLOGV << "CheckStartAutoOptimize with cutoff=" << iCutoff;
if ( !iCutoff )
return;
iCutoff *= GetCutOff ( m_tMutableSettings );
if ( m_tRtChunks.GetDiskChunksCount()<=iCutoff )
return;
OptimizeTask_t tTask;
tTask.m_eVerb = OptimizeTask_t::eAutoOptimize;
tTask.m_iCutoff = iCutoff;
RTDLOG << "StartOptimize for " << GetName () << ", auto-optimize with cutoff " << iCutoff;
StartOptimize ( std::move ( tTask ) );
}
//////////////////////////////////////////////////////////////////////////
// STATUS
//////////////////////////////////////////////////////////////////////////
void RtIndex_c::GetStatus ( CSphIndexStatus * pRes ) const
{
assert ( pRes );
if ( !pRes )
return;
auto tGuard = RtGuard();
int64_t iUsedRam = SegmentsGetUsedRam ( tGuard.m_dRamSegs );
pRes->m_iDead = SegmentsGetDeadRows ( tGuard.m_dRamSegs );
pRes->m_iRamChunkSize = iUsedRam + tGuard.m_dRamSegs.GetLength()*int(sizeof(RtSegment_t));
pRes->m_iRamUse = sizeof( RtIndex_c ) + pRes->m_iRamChunkSize;
pRes->m_iRamRetired = m_iRamChunksAllocatedRAM.load(std::memory_order_relaxed) - iUsedRam;
pRes->m_iMemLimit = m_iRtMemLimit;
pRes->m_fSaveRateLimit = m_fSaveRateLimit;
CSphString sError;
for ( const char * szExt : { "meta", "ram" } )
{
CSphAutofile fdRT ( GetFilename ( szExt ), SPH_O_READ, sError );
int64_t iFileSize = fdRT.GetSize();
if ( iFileSize>0 )
pRes->m_iDiskUse += iFileSize; // that uses disk, but not occupies
}
CSphIndexStatus tDisk;
for ( const auto& pChunk : tGuard.m_dDiskChunks )
{
pChunk->Cidx().GetStatus ( &tDisk );
pRes->m_iRamUse += tDisk.m_iRamUse;
pRes->m_iDiskUse += tDisk.m_iDiskUse;
pRes->m_iMapped += tDisk.m_iMapped;
pRes->m_iMappedResident += tDisk.m_iMappedResident;
pRes->m_iMappedDocs += tDisk.m_iMappedDocs;
pRes->m_iMappedResidentDocs += tDisk.m_iMappedResidentDocs;
pRes->m_iMappedHits += tDisk.m_iMappedHits;
pRes->m_iMappedResidentHits += tDisk.m_iMappedResidentHits;
pRes->m_iDead += tDisk.m_iDead;
}
pRes->m_iNumRamChunks = tGuard.m_dRamSegs.GetLength();
pRes->m_iNumChunks = tGuard.m_dDiskChunks.GetLength();
pRes->m_iTID = m_iTID;
pRes->m_iSavedTID = m_iSavedTID;
pRes->m_iLockCount = GetNumOfLocks();
pRes->m_iOptimizesCount = OptimizesRunning();
// sphWarning ( "Chunks: %d, RAM: %d, DISK: %d", pRes->m_iNumChunks, (int) pRes->m_iRamUse, (int) pRes->m_iDiskUse );
}
//////////////////////////////////////////////////////////////////////////
// RECONFIGURE
//////////////////////////////////////////////////////////////////////////
bool CreateReconfigure ( const CSphString & sIndexName, bool bIsStarDict, const ISphFieldFilter * pFieldFilter,
const CSphIndexSettings & tIndexSettings, uint64_t uTokHash, uint64_t uDictHash, int iMaxCodepointLength, int64_t iMemLimit,
bool bSame, CSphReconfigureSettings & tSettings, CSphReconfigureSetup & tSetup, StrVec_t & dWarnings, CSphString & sError )
{
CreateFilenameBuilder_fn fnCreateFilenameBuilder = GetIndexFilenameBuilder();
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder;
if ( fnCreateFilenameBuilder )
pFilenameBuilder = fnCreateFilenameBuilder ( sIndexName.cstr() );
// FIXME!!! check missed embedded files
TokenizerRefPtr_c pTokenizer = Tokenizer::Create ( tSettings.m_tTokenizer, nullptr, pFilenameBuilder.get(), dWarnings, sError );
if ( !pTokenizer )
{
sError.SetSprintf ( "'%s' failed to create tokenizer, error '%s'", sIndexName.cstr(), sError.cstr() );
return true;
}
// dict setup second
DictRefPtr_c tDict { sphCreateDictionaryCRC ( tSettings.m_tDict, nullptr, pTokenizer, sIndexName.cstr(), false, tIndexSettings.m_iSkiplistBlockSize, pFilenameBuilder.get(), sError ) };
if ( !tDict )
{
sError.SetSprintf ( "'%s' failed to create dictionary, error '%s'", sIndexName.cstr(), sError.cstr() );
return true;
}
// multiforms right after dict
Tokenizer::AddToMultiformFilterTo ( pTokenizer, tDict->GetMultiWordforms() );
// bigram filter
if ( tSettings.m_tIndex.m_eBigramIndex!=SPH_BIGRAM_NONE && tSettings.m_tIndex.m_eBigramIndex!=SPH_BIGRAM_ALL )
{
pTokenizer->SetBuffer ( (BYTE*)const_cast<char*> ( tSettings.m_tIndex.m_sBigramWords.cstr() ), tSettings.m_tIndex.m_sBigramWords.Length() );
BYTE * pTok = nullptr;
while ( ( pTok = pTokenizer->GetToken() )!=nullptr )
tSettings.m_tIndex.m_dBigramWords.Add() = (const char*)pTok;
tSettings.m_tIndex.m_dBigramWords.Sort();
}
bool bNeedExact = ( tDict->HasMorphology() || tDict->GetWordformsFileInfos().GetLength() );
if ( tSettings.m_tIndex.m_bIndexExactWords && !bNeedExact )
tSettings.m_tIndex.m_bIndexExactWords = false;
if ( tDict->GetSettings().m_bWordDict && tDict->HasMorphology() && bIsStarDict && !tSettings.m_tIndex.m_bIndexExactWords )
tSettings.m_tIndex.m_bIndexExactWords = true;
// re filter
bool bReFilterSame = true;
CSphFieldFilterSettings tFieldFilterSettings;
if ( pFieldFilter )
pFieldFilter->GetSettings ( tFieldFilterSettings );
if ( tFieldFilterSettings.m_dRegexps.GetLength()!=tSettings.m_tFieldFilter.m_dRegexps.GetLength() )
{
bReFilterSame = false;
} else
{
CSphVector<uint64_t> dFieldFilter;
ARRAY_FOREACH ( i, tFieldFilterSettings.m_dRegexps )
dFieldFilter.Add ( sphFNV64 ( tFieldFilterSettings.m_dRegexps[i].cstr() ) );
dFieldFilter.Uniq();
uint64_t uMyFF = sphFNV64 ( dFieldFilter.Begin(), sizeof(dFieldFilter[0]) * dFieldFilter.GetLength() );
dFieldFilter.Resize ( 0 );
ARRAY_FOREACH ( i, tSettings.m_tFieldFilter.m_dRegexps )
dFieldFilter.Add ( sphFNV64 ( tSettings.m_tFieldFilter.m_dRegexps[i].cstr() ) );
dFieldFilter.Uniq();
uint64_t uNewFF = sphFNV64 ( dFieldFilter.Begin(), sizeof(dFieldFilter[0]) * dFieldFilter.GetLength() );
bReFilterSame = ( uMyFF==uNewFF );
}
// field filter
std::unique_ptr<ISphFieldFilter> tFieldFilter;
if ( !bReFilterSame && tSettings.m_tFieldFilter.m_dRegexps.GetLength () )
{
tFieldFilter = sphCreateRegexpFilter ( tSettings.m_tFieldFilter, sError );
if ( !tFieldFilter )
{
sError.SetSprintf ( "'%s' failed to create field filter, error '%s'", sIndexName.cstr (), sError.cstr () );
return true;
}
}
// icu filter
bool bIcuSame = ( tIndexSettings.m_ePreprocessor==tSettings.m_tIndex.m_ePreprocessor );
if ( !bIcuSame )
{
if ( !sphSpawnFilterICU ( tFieldFilter, tSettings.m_tIndex, tSettings.m_tTokenizer, sIndexName.cstr(), sError ) )
{
sError.SetSprintf ( "'%s' failed to create field filter, error '%s'", sIndexName.cstr (), sError.cstr () );
return true;
}
}
// compare options
if ( !bSame || uTokHash!=pTokenizer->GetSettingsFNV() || uDictHash!=tDict->GetSettingsFNV() ||
iMaxCodepointLength!=pTokenizer->GetMaxCodepointLength() || sphGetSettingsFNV ( tIndexSettings )!=sphGetSettingsFNV ( tSettings.m_tIndex ) ||
!bReFilterSame || !bIcuSame || tSettings.m_tMutableSettings.HasSettings() )
{
tSetup.m_pTokenizer = pTokenizer.Leak();
tSetup.m_pDict = tDict.Leak();
tSetup.m_tIndex = tSettings.m_tIndex;
tSetup.m_pFieldFilter = std::move ( tFieldFilter );
tSetup.m_tMutableSettings = tSettings.m_tMutableSettings;
return false;
}
return true;
}
bool RtIndex_c::IsSameSettings ( CSphReconfigureSettings & tSettings, CSphReconfigureSetup & tSetup, StrVec_t & dWarnings, CSphString & sError ) const
{
bool bSame = true;
if ( tSettings.m_bChangeSchema && m_uSchemaHash!=SchemaFNV ( tSettings.m_tSchema ) )
{
tSetup.m_tSchema = tSettings.m_tSchema;
tSetup.m_bChangeSchema = true;
bSame = false;
}
return CreateReconfigure ( GetName(), IsStarDict ( m_bKeywordDict ), m_pFieldFilter.get(), m_tSettings, m_pTokenizer->GetSettingsFNV(), m_pDict->GetSettingsFNV(), m_pTokenizer->GetMaxCodepointLength(),
GetMemLimit(), bSame, tSettings, tSetup, dWarnings, sError );
}
bool RtIndex_c::Reconfigure ( CSphReconfigureSetup & tSetup )
{
// strength single-fiber access (don't rely upon to upstream w-lock)
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_SCHED ( "rt", "Reconfigure" );
if ( !ForceDiskChunk() )
return false;
if ( tSetup.m_bChangeSchema )
SetSchema ( tSetup.m_tSchema );
if ( tSetup.m_tMutableSettings.HasSettings() )
{
m_tMutableSettings.Combine ( tSetup.m_tMutableSettings );
SetMemLimit ( m_tMutableSettings.m_iMemLimit );
}
bool bWasBinlog = m_tSettings.m_bBinlog;
Setup ( tSetup.m_tIndex );
bool bNewBinlog = m_tSettings.m_bBinlog;
SetTokenizer ( tSetup.m_pTokenizer );
SetDictionary ( tSetup.m_pDict );
SetFieldFilter ( std::move ( tSetup.m_pFieldFilter ) );
m_iMaxCodepointLength = m_pTokenizer->GetMaxCodepointLength();
SetupQueryTokenizer();
// FIXME!!! handle error
m_pTokenizerIndexing = m_pTokenizer->Clone ( SPH_CLONE_INDEX );
Tokenizer::AddBigramFilterTo ( m_pTokenizerIndexing, m_tSettings.m_eBigramIndex, m_tSettings.m_sBigramWords, m_sLastError );
AlterSave ( false );
RaiseAlterGeneration();
if ( bWasBinlog != bNewBinlog )
{
// AlterSave just performed flush
m_iSavedTID = m_iTID = bNewBinlog ? Binlog::LastTidFor ( GetName () ) : -1;
SaveMeta ();
}
return true;
}
int RtIndex_c::Kill ( DocID_t tDocID )
{
assert ( 0 && "No external kills for RT");
return 0;
}
int RtIndex_c::KillMulti ( const VecTraits_T<DocID_t> & /*dKlist*/ )
{
assert ( 0 && "No external kills for RT");
return 0;
}
bool RtIndex_c::IsAlive ( DocID_t tDocID ) const
{
assert ( 0 && "No external kills for RT");
return false;
}
uint64_t sphGetSettingsFNV ( const CSphIndexSettings & tSettings )
{
uint64_t uHash = 0;
DWORD uFlags = 0;
if ( tSettings.m_bHtmlStrip )
uFlags |= 1<<1;
if ( tSettings.m_bIndexExactWords )
uFlags |= 1<<2;
if ( tSettings.m_bIndexFieldLens )
uFlags |= 1<<3;
if ( tSettings.m_bIndexSP )
uFlags |= 1<<4;
if ( tSettings.m_bBinlog )
uFlags |= 1<<5;
uHash = sphFNV64 ( &uFlags, sizeof(uFlags), uHash );
int iMinPrefixLen = tSettings.RawMinPrefixLen();
uHash = sphFNV64 ( &tSettings.m_eHitFormat, sizeof(tSettings.m_eHitFormat), uHash );
uHash = sphFNV64 ( tSettings.m_sHtmlIndexAttrs.cstr(), tSettings.m_sHtmlIndexAttrs.Length(), uHash );
uHash = sphFNV64 ( tSettings.m_sHtmlRemoveElements.cstr(), tSettings.m_sHtmlRemoveElements.Length(), uHash );
uHash = sphFNV64 ( tSettings.m_sZones.cstr(), tSettings.m_sZones.Length(), uHash );
uHash = sphFNV64 ( &tSettings.m_eHitless, sizeof(tSettings.m_eHitless), uHash );
uHash = sphFNV64 ( tSettings.m_sHitlessFiles.cstr(), tSettings.m_sHitlessFiles.Length(), uHash );
uHash = sphFNV64 ( &tSettings.m_eBigramIndex, sizeof(tSettings.m_eBigramIndex), uHash );
uHash = sphFNV64 ( tSettings.m_sBigramWords.cstr(), tSettings.m_sBigramWords.Length(), uHash );
uHash = sphFNV64 ( &tSettings.m_uAotFilterMask, sizeof(tSettings.m_uAotFilterMask), uHash );
uHash = sphFNV64 ( &tSettings.m_ePreprocessor, sizeof(tSettings.m_ePreprocessor), uHash );
uHash = sphFNV64 ( tSettings.m_sIndexTokenFilter.cstr(), tSettings.m_sIndexTokenFilter.Length(), uHash );
uHash = sphFNV64 ( &iMinPrefixLen, sizeof(iMinPrefixLen), uHash );
uHash = sphFNV64 ( &tSettings.m_iMinInfixLen, sizeof(tSettings.m_iMinInfixLen), uHash );
uHash = sphFNV64 ( &tSettings.m_iMaxSubstringLen, sizeof(tSettings.m_iMaxSubstringLen), uHash );
uHash = sphFNV64 ( &tSettings.m_iBoundaryStep, sizeof(tSettings.m_iBoundaryStep), uHash );
uHash = sphFNV64 ( &tSettings.m_iOvershortStep, sizeof(tSettings.m_iOvershortStep), uHash );
uHash = sphFNV64 ( &tSettings.m_iStopwordStep, sizeof(tSettings.m_iStopwordStep), uHash );
return uHash;
}
void RtIndex_c::GetIndexFiles ( StrVec_t& dFiles, StrVec_t& dExt, const FilenameBuilder_i* pParentFilenameBuilder ) const
{
auto fnAddFile = [this, &dFiles] ( const auto tExt ) {
auto sFile = GetFilename ( tExt );
if ( sphIsReadable ( sFile ) )
dFiles.Add ( std::move ( sFile ) );
};
fnAddFile ( "meta" );
fnAddFile ( "ram" );
if ( m_tMutableSettings.NeedSave() ) // should be file already after post-setup
fnAddFile ( SPH_EXT_SETTINGS );
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder { nullptr };
if ( !pParentFilenameBuilder && GetIndexFilenameBuilder() )
{
pFilenameBuilder = GetIndexFilenameBuilder() ( GetName() );
pParentFilenameBuilder = pFilenameBuilder.get();
}
GetSettingsFiles ( m_pTokenizer, m_pDict, GetSettings(), pParentFilenameBuilder, dExt );
RtGuard().m_dDiskChunks.for_each ( [&] ( ConstDiskChunkRefPtr_t& p ) { p->Cidx().GetIndexFiles ( dFiles, dExt, pParentFilenameBuilder ); } );
dExt.Uniq(); // might be duplicates of tok \ dict files from disk chunks
}
void RtIndex_c::ProhibitSave()
{
StopOptimize();
m_tSaving.SetState ( SaveState_c::DISCARD );
std::atomic_thread_fence ( std::memory_order_release );
}
void RtIndex_c::EnableSave()
{
m_tSaving.SetState ( SaveState_c::ENABLED );
m_bOptimizeStop.store ( false, std::memory_order_relaxed );
std::atomic_thread_fence ( std::memory_order_release );
}
// fixme! Review, if it still necessary, as SST locks everything itself.
void RtIndex_c::LockFileState ( CSphVector<CSphString>& dFiles )
{
StopOptimize();
ForceRamFlush ( "forced" );
CSphString sError;
SaveAttributes ( sError ); // fixme! report error, better discard whole locking
// that will ensure, if current txn is applying, it will be finished (especially kill pass) before we continue.
assert ( Threads::IsInsideCoroutine());
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
m_tSaving.SetState ( SaveState_c::DISABLED );
std::atomic_thread_fence ( std::memory_order_release );
GetIndexFiles ( dFiles, dFiles );
}
void RtIndex_c::CreateReader ( int64_t iSessionId ) const
{
// rt docstore doesn't need buffered readers
// but disk chunks need them
auto tGuard = RtGuard();
for ( const auto & i : tGuard.m_dDiskChunks )
{
assert(i);
i->Cidx().CreateReader ( iSessionId );
}
}
bool RtIndex_c::GetDoc ( DocstoreDoc_t & tDoc, DocID_t tDocID, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const
{
auto tGuard = RtGuard();
for ( const auto & i : tGuard.m_dRamSegs )
{
assert ( i && i->m_pDocstore );
RowID_t tRowID = i->GetRowidByDocid(tDocID);
if ( tRowID==INVALID_ROWID || i->m_tDeadRowMap.IsSet(tRowID) )
continue;
tDoc = i->m_pDocstore->GetDoc ( tRowID, pFieldIds, iSessionId, bPack );
return true;
}
for ( const auto & i : tGuard.m_dDiskChunks )
{
assert(i);
if ( i->Cidx().GetDoc ( tDoc, tDocID, pFieldIds, iSessionId, bPack ) )
return true;
}
return false;
}
int RtIndex_c::GetFieldId ( const CSphString & sName, DocstoreDataType_e eType ) const
{
return m_pDocstoreFields ? m_pDocstoreFields->GetFieldId ( sName, eType ) : -1;
}
Bson_t RtIndex_c::ExplainQuery ( const CSphString & sQuery ) const
{
ExplainQueryArgs_t tArgs;
tArgs.m_szQuery = sQuery.cstr();
tArgs.m_pSchema = &GetMatchSchema();
TokenizerRefPtr_c pQueryTokenizer { sphCloneAndSetupQueryTokenizer ( m_pTokenizer, IsStarDict ( m_bKeywordDict ), m_tSettings.m_bIndexExactWords, false ) };
SetupStarTokenizer ( pQueryTokenizer );
SetupExactTokenizer ( pQueryTokenizer );
tArgs.m_pDict = GetStatelessDict ( m_pDict );
SetupStarDictV8 ( tArgs.m_pDict );
SetupExactDict ( tArgs.m_pDict );
if ( m_pFieldFilter )
tArgs.m_pFieldFilter = m_pFieldFilter->Clone();
tArgs.m_pSettings = &m_tSettings;
tArgs.m_pWordlist = this;
tArgs.m_pQueryTokenizer = pQueryTokenizer;
tArgs.m_iExpandKeywords = m_tMutableSettings.m_iExpandKeywords;
tArgs.m_iExpansionLimit = m_iExpansionLimit;
tArgs.m_bExpandPrefix = ( m_pDict->GetSettings().m_bWordDict && IsStarDict ( m_bKeywordDict ) );
tArgs.m_pMorphFields = &m_tMorphFields;
auto tGuard = RtGuard();
tArgs.m_pIndexData = tGuard.m_tSegmentsAndChunks.m_pSegs;
return Explain ( tArgs );
}
bool RtIndex_c::NeedStoreWordID () const
{
return ( m_tSettings.m_eHitless==SPH_HITLESS_SOME && m_dHitlessWords.GetLength() );
}
//////////////////////////////////////////////////////////////////////////
std::unique_ptr<RtIndex_i> sphCreateIndexRT ( CSphString sIndexName, CSphString sPath, CSphSchema tSchema, int64_t iRamSize, bool bKeywordDict )
{
MEMORY ( MEM_INDEX_RT );
return std::make_unique<RtIndex_c> ( std::move ( sIndexName ), std::move ( sPath ), std::move ( tSchema ), iRamSize, bKeywordDict );
}
void sphRTInit ( CSphString sBinlogPath, bool bCommonBinlog, const CSphConfigSection * pCommon )
{
Binlog::Init ( std::move ( sBinlogPath ) );
Binlog::SetCommon ( bCommonBinlog );
if ( pCommon )
g_bProgressiveMerge = pCommon->GetBool ( "progressive_merge", true );
}
static bool g_bTestMode = false;
void sphRTSetTestMode ()
{
g_bTestMode = true;
}
static void SetColumnarFlag ( CSphColumnInfo & tCol, const CSphIndexSettings & tSettings )
{
bool bAllColumnar = false;
for ( const auto & i : tSettings.m_dColumnarAttrs )
bAllColumnar |= i=="*";
for ( const auto & i : tSettings.m_dColumnarAttrs )
if ( ( i==tCol.m_sName || bAllColumnar ) && tCol.m_eAttrType!=SPH_ATTR_JSON )
tCol.m_uAttrFlags |= CSphColumnInfo::ATTR_COLUMNAR;
}
static void SetKNNFlag ( CSphColumnInfo & tCol, const CSphIndexSettings & tSettings )
{
for ( const auto & i : tSettings.m_dKNN )
if ( i.m_sName==tCol.m_sName )
tCol.m_uAttrFlags |= CSphColumnInfo::ATTR_INDEXED_KNN;
}
bool sphRTSchemaConfigure ( const CSphConfigSection & hIndex, CSphSchema & tSchema, const CSphIndexSettings & tSettings, StrVec_t * pWarnings, CSphString & sError, bool bSkipValidation, bool bPQ )
{
// fields
SmallStringHash_T<BYTE> hFields;
for ( CSphVariant * v=hIndex("rt_field"); v; v=v->m_pNext )
{
CSphString sFieldName = v->cstr();
sFieldName.ToLower();
tSchema.AddField ( sFieldName.cstr() );
hFields.Add ( 1, sFieldName );
}
if ( tSchema.GetFieldsCount()>SPH_MAX_FIELDS )
{
sError.SetSprintf ( "too many fields (fields=%d, max=%d)", tSchema.GetFieldsCount(), SPH_MAX_FIELDS );
return false;
}
// add id column
CSphColumnInfo tDocIdCol ( sphGetDocidName() );
tDocIdCol.m_eAttrType = SPH_ATTR_BIGINT;
if ( !bPQ )
SetColumnarFlag ( tDocIdCol, tSettings );
tSchema.AddAttr ( tDocIdCol, false );
// attrs
constexpr int iNumTypes = 10;
const char * sTypes[iNumTypes] = { "rt_attr_uint", "rt_attr_bigint", "rt_attr_timestamp", "rt_attr_bool", "rt_attr_float", "rt_attr_string", "rt_attr_json", "rt_attr_multi", "rt_attr_multi_64", "rt_attr_float_vector" };
const ESphAttr iTypes[iNumTypes] = { SPH_ATTR_INTEGER, SPH_ATTR_BIGINT, SPH_ATTR_TIMESTAMP, SPH_ATTR_BOOL, SPH_ATTR_FLOAT, SPH_ATTR_STRING, SPH_ATTR_JSON, SPH_ATTR_UINT32SET, SPH_ATTR_INT64SET, SPH_ATTR_FLOAT_VECTOR };
CSphVector<std::pair<int, CSphColumnInfo>> dOrderedColumns;
for ( int iType = 0; iType < iNumTypes; ++iType )
{
for ( CSphVariant * v = hIndex ( sTypes[iType] ); v; v = v->m_pNext )
{
StrVec_t dNameParts;
sphSplit ( dNameParts, v->cstr(), ":");
CSphColumnInfo tCol ( dNameParts[0].cstr(), iTypes[iType]);
tCol.m_sName.ToLower();
// ignore doc id, it was added via create table to pass id attribute settings
if ( tCol.m_sName==sphGetDocidName() )
continue;
// bitcount
tCol.m_tLocator = CSphAttrLocator();
if ( dNameParts.GetLength ()>1 )
{
if ( tCol.m_eAttrType==SPH_ATTR_INTEGER )
{
auto iBits = strtol ( dNameParts[1].cstr(), nullptr, 10 );
if ( iBits>0 && iBits<=ROWITEM_BITS )
tCol.m_tLocator.m_iBitCount = (int)iBits;
else
sError.SetSprintf ( "attribute '%s': invalid bitcount=%d (bitcount ignored)", tCol.m_sName.cstr(), (int)iBits );
} else
sError.SetSprintf ( "attribute '%s': bitcount is only supported for integer types (bitcount ignored)", tCol.m_sName.cstr() );
}
dOrderedColumns.Add ( { v->m_iTag, tCol } );
}
}
dOrderedColumns.Sort ( Lesser ( [] ( const auto& a, const auto& b ) { return a.first < b.first; } ) );
for ( auto& tOrderedCol : dOrderedColumns )
{
auto& tCol = tOrderedCol.second;
if ( !SchemaConfigureCheckAttribute ( tSchema, tCol, sError ) )
return false;
if ( !bPQ )
{
SetColumnarFlag ( tCol, tSettings );
SetKNNFlag ( tCol, tSettings );
}
tSchema.AddAttr ( tCol, false );
if ( tCol.m_eAttrType != SPH_ATTR_STRING && hFields.Exists ( tCol.m_sName ) && !bSkipValidation )
{
sError.SetSprintf ( "can not add attribute that shadows '%s' field", tCol.m_sName.cstr() );
return false;
}
}
// add blob attr locator
if ( tSchema.HasBlobAttrs() )
{
CSphColumnInfo tBlobLocCol ( sphGetBlobLocatorName() );
tBlobLocCol.m_eAttrType = SPH_ATTR_BIGINT;
// should be right after docid
tSchema.InsertAttr ( 1, tBlobLocCol, false );
// rebuild locators in the schema
const char * szTmpColName = "$_tmp";
CSphColumnInfo tTmpCol ( szTmpColName, SPH_ATTR_BIGINT );
tSchema.AddAttr ( tTmpCol, false );
tSchema.RemoveAttr ( szTmpColName, false );
}
if ( !tSchema.GetAttrsCount() && !g_bTestMode && !bSkipValidation )
{
sError.SetSprintf ( "no attribute configured (use rt_attr directive)" );
return false;
}
return true;
}
void RtIndex_c::SetSchema ( CSphSchema tSchema )
{
m_tSchema = std::move ( tSchema );
m_iStride = m_tSchema.GetRowSize();
m_uSchemaHash = SchemaFNV ( m_tSchema );
if ( m_tSchema.HasStoredFields() || m_tSchema.HasStoredAttrs() )
{
m_pDocstoreFields = CreateDocstoreFields();
SetupDocstoreFields ( *m_pDocstoreFields.get(), m_tSchema );
}
ARRAY_FOREACH ( i, m_dFieldLens )
{
m_dFieldLens[i] = 0;
m_dFieldLensRam[i] = 0;
m_dFieldLensDisk[i] = 0;
}
}
uint64_t SchemaFNV ( const ISphSchema & tSchema )
{
uint64_t uHash = SPH_FNV64_SEED;
// attrs
int iAttrsCount = tSchema.GetAttrsCount();
for ( int i=0; i<iAttrsCount; i++ )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr ( i );
uHash = sphFNV64cont ( tAttr.m_sName.cstr(), uHash );
uHash = sphFNV64 ( &tAttr.m_eAttrType, sizeof( tAttr.m_eAttrType ), uHash );
uHash = tAttr.m_tLocator.FNV ( uHash );
}
// fulltext fields
int iFieldsCount = tSchema.GetFieldsCount();
for ( int i=0; i<iFieldsCount; i++ )
{
const CSphColumnInfo & tField = tSchema.GetField ( i );
uHash = sphFNV64cont ( tField.m_sName.cstr(), uHash );
uHash = sphFNV64 ( &tField.m_uFieldFlags, sizeof( tField.m_uFieldFlags ), uHash );
}
return uHash;
}
void RtIndex_c::SetMemLimit ( int64_t iMemLimit )
{
m_iRtMemLimit = iMemLimit;
m_iSoftRamLimit = m_iRtMemLimit * m_fSaveRateLimit;
}
void RtIndex_c::RecalculateRateLimit ( int64_t iSaved, int64_t iInserted, bool bEmergent )
{
if ( ( iSaved + iInserted ) > 0 )
{
auto fRate = (double)iSaved / ( iSaved + iInserted );
if ( bEmergent ) // emergent save happened
{
m_fSaveRateLimit -= SAVE_RATE_LIMIT_EMERGENCY_STEP;
m_fSaveRateLimit = Min ( m_fSaveRateLimit, fRate );
} else
m_fSaveRateLimit = fRate;
}
m_fSaveRateLimit = Min ( MAX_SAVE_RATE_LIMIT, m_fSaveRateLimit );
m_fSaveRateLimit = Max ( MIN_SAVE_RATE_LIMIT, m_fSaveRateLimit );
m_iSoftRamLimit = m_iRtMemLimit * m_fSaveRateLimit;
TRACE_COUNTER ( "mem", perfetto::CounterTrack ( "Ratio", "%" ), m_fSaveRateLimit );
}
bool RtIndex_c::AlterSI ( CSphString & sError )
{
// strength single-fiber access (don't rely upon to upstream w-lock)
ScopedScheduler_c tSerialFiber ( m_tWorkers.SerialChunkAccess() );
TRACE_SCHED ( "rt", "alter-si" );
auto pChunks = m_tRtChunks.DiskChunks();
for ( auto & tChunk : *pChunks )
{
if ( !tChunk->CastIdx().AlterSI ( sError ) )
return false;
}
RaiseAlterGeneration();
return true;
}
void RtIndex_c::SetGlobalIDFPath ( const CSphString & sPath )
{
m_sGlobalIDFPath = sPath;
auto pChunks = m_tRtChunks.DiskChunks();
for ( auto & pChunk : *pChunks )
pChunk->CastIdx().SetGlobalIDFPath ( m_sGlobalIDFPath );
}
void RtIndex_c::DebugDumpDict ( FILE * fp, bool bDumpOnly )
{
if ( !m_bKeywordDict )
sphDie ( "DebugDumpDict() only supports dict=keywords for now" );
if ( !bDumpOnly )
fprintf ( fp, "keyword,docs,hits,offset\n" );
auto tGuard = RtGuard();
for ( const auto & pSeg : tGuard.m_dRamSegs )
{
RtWordReader_c tRdWord ( pSeg, m_bKeywordDict, m_iWordsCheckpoint, m_tSettings.m_eHitless );
while ( tRdWord.UnzipWord() )
fprintf ( fp, "%s,%u,%u,0\n", tRdWord->m_sWord, tRdWord->m_uDocs, tRdWord->m_uHits );
}
for ( auto & tDiskChunk : tGuard.m_dDiskChunks )
tDiskChunk->CastIdx().DebugDumpDict ( fp, true );
}
| 354,906
|
C++
|
.cpp
| 8,746
| 37.793391
| 455
| 0.709102
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,953
|
sorterprecalc.cpp
|
manticoresoftware_manticoresearch/src/sorterprecalc.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sorterprecalc.h"
#include "sortertraits.h"
static const char * GetPrecalcSorterName() { return "Precalc"; }
class FastBaseSorter_c : public MatchSorter_c, ISphNoncopyable, protected BaseGroupSorter_c
{
public:
FastBaseSorter_c ( const CSphGroupSorterSettings & tSettings ) : BaseGroupSorter_c ( tSettings ) {}
bool IsGroupby () const final { return true; }
bool CanBeCloned() const final { return false; }
void SetMerge ( bool bMerge ) final {}
void Finalize ( MatchProcessor_i & tProcessor, bool, bool bFinalizeMatches ) final { if ( GetLength() ) tProcessor.Process ( &m_tData ); }
int GetLength() final { return m_bDataInitialized ? 1 : 0; }
ISphMatchSorter * Clone() const final { return nullptr; }
void MoveTo ( ISphMatchSorter * pRhs, bool bCopyMeta ) final { assert ( 0 && "Not supported"); }
bool IsPrecalc() const final { return true; }
int Flatten ( CSphMatch * pTo ) final;
protected:
CSphMatch m_tData;
bool m_bDataInitialized = false;
};
int FastBaseSorter_c::Flatten ( CSphMatch * pTo )
{
assert ( m_bDataInitialized );
Swap ( *pTo, m_tData );
m_iTotal = 0;
m_bDataInitialized = false;
return 1;
}
// fast count distinct sorter
// works by using precalculated count distinct taken from secondary indexes
class FastCountDistinctSorter_c final : public FastBaseSorter_c
{
public:
FastCountDistinctSorter_c ( int iCountDistinct, const CSphString & sAttr, const CSphGroupSorterSettings & tSettings );
bool Push ( const CSphMatch & tEntry ) final { return PushEx(tEntry); }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) final { assert ( 0 && "Not supported in grouping"); }
bool PushGrouped ( const CSphMatch & tEntry, bool ) final { return PushEx(tEntry); }
void AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const final { dDesc.Add ( { m_sAttr, GetPrecalcSorterName() } ); }
private:
int m_iCountDistinct = 0;
CSphString m_sAttr;
bool PushEx ( const CSphMatch & tEntry );
};
FastCountDistinctSorter_c::FastCountDistinctSorter_c ( int iCountDistinct, const CSphString & sAttr, const CSphGroupSorterSettings & tSettings )
: FastBaseSorter_c ( tSettings )
, m_iCountDistinct ( iCountDistinct )
, m_sAttr ( sAttr )
{}
FORCE_INLINE bool FastCountDistinctSorter_c::PushEx ( const CSphMatch & tEntry )
{
if ( m_bDataInitialized )
return true; // always return true, otherwise in RT indexes we won't be able to hit cutoff in disk chunks after the first one
m_pSchema->CloneMatch ( m_tData, tEntry );
m_tData.SetAttr ( m_tLocGroupby, 1 ); // fake group number
m_tData.SetAttr ( m_tLocCount, 1 );
m_tData.SetAttr ( m_tLocDistinct, m_iCountDistinct );
m_bDataInitialized = true;
m_iTotal++;
return true;
}
// fast count sorter
// works by using precalculated count taken from secondary indexes
class FastCountFilterSorter_c final : public FastBaseSorter_c
{
public:
FastCountFilterSorter_c ( int iCount, const CSphString & sAttr, const CSphGroupSorterSettings & tSettings );
bool Push ( const CSphMatch & tEntry ) final { return PushEx(tEntry); }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) final { assert ( 0 && "Not supported in grouping"); }
bool PushGrouped ( const CSphMatch & tEntry, bool ) final { return PushEx(tEntry); }
void AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const final { dDesc.Add ( { m_sAttr, GetPrecalcSorterName() } ); }
private:
int m_iCount = 0;
CSphString m_sAttr;
bool PushEx ( const CSphMatch & tEntry );
};
FastCountFilterSorter_c::FastCountFilterSorter_c ( int iCount, const CSphString & sAttr, const CSphGroupSorterSettings & tSettings )
: FastBaseSorter_c ( tSettings )
, m_iCount ( iCount )
, m_sAttr ( sAttr )
{}
FORCE_INLINE bool FastCountFilterSorter_c::PushEx ( const CSphMatch & tEntry )
{
if ( m_bDataInitialized )
return true; // always return true, otherwise in RT indexes we won't be able to hit cutoff in disk chunks after the first one
m_pSchema->CloneMatch ( m_tData, tEntry );
m_tData.SetAttr ( m_tLocGroupby, 1 ); // fake group number
m_tData.SetAttr ( m_tLocCount, 1 );
m_tData.SetAttr ( m_tLocCount, m_iCount );
m_bDataInitialized = true;
m_iTotal++;
return true;
}
///////////////////////////////////////////////////////////////////////////////
ISphMatchSorter * CreatePrecalcSorter ( const PrecalculatedSorterResults_t & tPrecalc, const CSphGroupSorterSettings & tSettings )
{
if ( tPrecalc.m_iCountDistinct!=-1 )
return new FastCountDistinctSorter_c ( tPrecalc.m_iCountDistinct, tPrecalc.m_sAttr, tSettings );
if ( tPrecalc.m_iCountFilter!=-1 )
return new FastCountFilterSorter_c ( tPrecalc.m_iCountFilter, tPrecalc.m_sAttr, tSettings );
if ( tPrecalc.m_iCount!=-1 )
return new FastCountFilterSorter_c ( tPrecalc.m_iCount, "count(*)", tSettings );
return nullptr;
}
| 5,331
|
C++
|
.cpp
| 114
| 44.807018
| 144
| 0.725449
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,954
|
frontendschema.cpp
|
manticoresoftware_manticoresearch/src/frontendschema.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "frontendschema.h"
#include "queuecreator.h"
/// returns internal magic names for expressions like COUNT(*) that have a corresponding one
/// returns expression itself otherwise
static const char * GetMagicSchemaName ( const CSphString & s )
{
if ( s=="count(*)" )
return "@count";
if ( s=="weight()" )
return "@weight";
if ( s=="groupby()" )
return "@groupby";
return s.cstr();
}
/// a functor to sort columns by (is_aggregate ASC, column_index ASC)
struct AggregateColumnSort_fn
{
bool IsAggr ( const CSphColumnInfo & c ) const
{
return c.m_eAggrFunc!=SPH_AGGR_NONE
|| c.m_sName=="@groupby"
|| c.m_sName=="@count"
|| c.m_sName=="@distinct"
|| IsSortJsonInternal ( c.m_sName );
}
bool IsLess ( const CSphColumnInfo & a, const CSphColumnInfo & b ) const
{
bool aa = IsAggr(a);
bool bb = IsAggr(b);
if ( aa!=bb )
return aa < bb;
return a.m_iIndex < b.m_iIndex;
}
};
///////////////////////////////////////////////////////////////////////////////
FrontendSchemaBuilder_c::FrontendSchemaBuilder_c ( const AggrResult_t & tRes, const CSphQuery & tQuery, const CSphVector<CSphQueryItem> & dItems, const CSphVector<CSphQueryItem> & dQueryItems, const sph::StringSet & hExtraColumns, bool bQueryFromAPI, bool bHaveLocals )
: m_tRes ( tRes )
, m_tQuery ( tQuery )
, m_dItems ( dItems )
, m_dQueryItems ( dQueryItems )
, m_hExtraColumns ( hExtraColumns )
, m_bQueryFromAPI ( bQueryFromAPI )
, m_bHaveLocals ( bHaveLocals )
, m_bAgent ( tQuery.m_bAgent )
{
m_dFrontend.Resize(dItems.GetLength());
}
bool FrontendSchemaBuilder_c::Build ( bool bMaster, CSphString & sError )
{
CollectKnownItems();
AddAttrs();
AddNullMask();
if ( !CheckUnmapped(sError) )
return false;
Finalize();
RemapGroupBy();
// agent should provide raw attributes into master without any remapping
if ( bMaster )
RemapFacets();
return true;
}
void FrontendSchemaBuilder_c::CollectKnownItems()
{
ARRAY_CONSTFOREACH ( i, m_dItems )
{
const CSphQueryItem & tItem = m_dItems[i];
int iCol = -1;
if ( !m_bQueryFromAPI && tItem.m_sAlias.IsEmpty() )
iCol = m_tRes.m_tSchema.GetAttrIndex ( tItem.m_sExpr.cstr() );
if ( iCol>=0 )
{
m_dKnownAttrs.Add(i);
m_dFrontend[i].m_sName = tItem.m_sExpr;
m_dFrontend[i].m_iIndex = iCol;
}
else
m_dUnmappedAttrs.Add(i);
}
}
void FrontendSchemaBuilder_c::AddAttrs()
{
bool bUsualApi = !m_bAgent && m_bQueryFromAPI;
for ( int iCol=0; iCol<m_tRes.m_tSchema.GetAttrsCount(); ++iCol )
{
const CSphColumnInfo & tCol = m_tRes.m_tSchema.GetAttr(iCol);
assert ( !tCol.m_sName.IsEmpty() );
bool bMagic = IsGroupbyMagic ( tCol.m_sName ) || IsSortStringInternal ( tCol.m_sName );
if ( !bMagic && tCol.m_pExpr )
{
ARRAY_FOREACH ( j, m_dUnmappedAttrs )
if ( m_dItems[ m_dUnmappedAttrs[j] ].m_sAlias==tCol.m_sName )
{
int k = m_dUnmappedAttrs[j];
m_dFrontend[k].m_iIndex = iCol;
m_dFrontend[k].m_sName = m_dItems[k].m_sAlias;
m_dKnownAttrs.Add(k);
m_dUnmappedAttrs.Remove ( j-- ); // do not skip an element next to removed one!
}
// FIXME?
// really not sure if this is the right thing to do
// but it fixes a couple queries in test_163 in compaitbility mode
if ( m_bAgent && !m_dFrontend.Contains ( bind ( &CSphColumnInfo::m_sName ), tCol.m_sName ) )
{
CSphColumnInfo & t = m_dFrontend.Add();
t.m_iIndex = iCol;
t.m_sName = tCol.m_sName;
}
} else if ( bMagic && ( tCol.m_pExpr || bUsualApi ) )
{
ARRAY_FOREACH ( j, m_dUnmappedAttrs )
if ( tCol.m_sName==GetMagicSchemaName ( m_dItems[ m_dUnmappedAttrs[j] ].m_sExpr ) )
{
int k = m_dUnmappedAttrs[j];
m_dFrontend[k].m_iIndex = iCol;
m_dFrontend[k].m_sName = m_dItems[k].m_sAlias;
m_dKnownAttrs.Add(k);
m_dUnmappedAttrs.Remove ( j-- ); // do not skip an element next to removed one!
}
if ( !m_dFrontend.Contains ( bind ( &CSphColumnInfo::m_sName ), tCol.m_sName ) )
{
CSphColumnInfo & t = m_dFrontend.Add();
t.m_iIndex = iCol;
t.m_sName = tCol.m_sName;
}
} else
{
bool bAdded = false;
ARRAY_FOREACH ( j, m_dUnmappedAttrs )
{
int k = m_dUnmappedAttrs[j];
const CSphQueryItem & t = m_dItems[k];
if ( ( tCol.m_sName==GetMagicSchemaName ( t.m_sExpr ) && t.m_eAggrFunc==SPH_AGGR_NONE )
|| ( t.m_sAlias==tCol.m_sName &&
( m_tRes.m_tSchema.GetAttrIndex ( GetMagicSchemaName ( t.m_sExpr ) )==-1 || t.m_eAggrFunc!=SPH_AGGR_NONE ) ) )
{
// tricky bit about naming
//
// in master mode, we can just use the alias or expression or whatever
// the data will be fetched using the locator anyway, column name does not matter anymore
//
// in agent mode, however, we need to keep the original column names in our response
// otherwise, queries like SELECT col1 c, count(*) c FROM dist will fail on master
// because it won't be able to identify the count(*) aggregate by its name
m_dFrontend[k].m_iIndex = iCol;
m_dFrontend[k].m_sName = m_bAgent
? tCol.m_sName
: ( m_dItems[k].m_sAlias.IsEmpty()
? m_dItems[k].m_sExpr
: m_dItems[k].m_sAlias );
m_dKnownAttrs.Add(k);
bAdded = true;
m_dUnmappedAttrs.Remove ( j-- ); // do not skip an element next to removed one!
}
}
// column was not found in the select list directly
// however we might need it anyway because of a non-NULL extra-schema
// (extra-schema is additional set of columns came from right side of query
// when you perform 'select a from index order by b', the 'b' is not displayed, but need for sorting,
// so extra-schema in the case will contain 'b').
// bMagic condition added for @groupbystr in the agent mode
if ( !bAdded && m_bAgent && ( m_hExtraColumns[tCol.m_sName] || !m_bHaveLocals || bMagic ) )
{
CSphColumnInfo & t = m_dFrontend.Add();
t.m_iIndex = iCol;
t.m_sName = tCol.m_sName;
}
}
}
m_dKnownAttrs.Sort();
}
void FrontendSchemaBuilder_c::AddNullMask()
{
int iNullMask = m_tRes.m_tSchema.GetAttrIndex ( GetNullMaskAttrName() );
if ( iNullMask < 0 )
return;
for ( auto & i : m_dFrontend )
if ( i.m_sName==GetNullMaskAttrName() )
return;
CSphColumnInfo & tAttr = m_dFrontend.Add();
tAttr.m_iIndex = iNullMask;
tAttr.m_sName = GetNullMaskAttrName();
}
bool FrontendSchemaBuilder_c::CheckUnmapped ( CSphString & sError ) const
{
// sanity check
// verify that we actually have all the queried select items
assert ( m_dUnmappedAttrs.IsEmpty() || ( m_dUnmappedAttrs.GetLength()==1 && m_dItems [ m_dUnmappedAttrs[0] ].m_sExpr=="id" ) );
ARRAY_CONSTFOREACH ( i, m_dItems )
{
const CSphQueryItem & tItem = m_dItems[i];
if ( !m_dKnownAttrs.BinarySearch(i) && tItem.m_sExpr!="id" )
{
sError.SetSprintf ( "internal error: column '%s/%s' not found in result set schema", tItem.m_sExpr.cstr(), tItem.m_sAlias.cstr() );
return false;
}
}
return true;
}
void FrontendSchemaBuilder_c::Finalize()
{
// finalize the frontend schema columns
// we kept indexes into internal schema there, now use them to lookup and copy column data
ARRAY_CONSTFOREACH ( i, m_dFrontend )
{
CSphColumnInfo & tFrontend = m_dFrontend[i];
const CSphColumnInfo & s = m_tRes.m_tSchema.GetAttr ( tFrontend.m_iIndex );
tFrontend.m_tLocator = s.m_tLocator;
tFrontend.m_eAttrType = s.m_eAttrType;
tFrontend.m_eAggrFunc = s.m_eAggrFunc; // for a sort loop just below
tFrontend.m_iIndex = i; // to make the aggr sort loop just below stable
tFrontend.m_uFieldFlags = s.m_uFieldFlags;
}
// tricky bit
// in agents only, push aggregated columns, if any, to the end
// for that, sort the schema by (is_aggregate ASC, column_index ASC)
if ( m_bAgent )
m_dFrontend.Sort ( AggregateColumnSort_fn() );
}
void FrontendSchemaBuilder_c::RemapGroupBy()
{
// remap groupby() and aliased groupby() to @groupbystr or string attribute
const CSphColumnInfo * p = nullptr;
CSphString sJsonGroupBy;
if ( sphJsonNameSplit ( m_tQuery.m_sGroupBy.cstr() ) )
{
sJsonGroupBy = SortJsonInternalSet ( m_tQuery.m_sGroupBy );
p = m_tRes.m_tSchema.GetAttr ( sJsonGroupBy.cstr() );
}
if ( !p )
{
// try string attribute (multiple group-by still displays hashes)
if ( !m_tQuery.m_sGroupBy.IsEmpty() )
{
p = m_tRes.m_tSchema.GetAttr ( m_tQuery.m_sGroupBy.cstr() );
if ( p )
{
if ( p->m_eAttrType==SPH_ATTR_JSON_PTR )
{
sJsonGroupBy = SortJsonInternalSet ( m_tQuery.m_sGroupBy );
p = m_tRes.m_tSchema.GetAttr ( sJsonGroupBy.cstr() );
} else if ( p->m_eAttrType!=SPH_ATTR_STRINGPTR )
{
p = nullptr;
}
}
}
if ( !p )
return;
}
for ( auto & tFrontend : m_dFrontend )
if ( tFrontend.m_sName=="groupby()" )
{
tFrontend.m_tLocator = p->m_tLocator;
tFrontend.m_eAttrType = p->m_eAttrType;
tFrontend.m_eAggrFunc = p->m_eAggrFunc;
}
// check aliases too
for ( const auto & tQueryItem : m_dQueryItems )
{
if ( tQueryItem.m_sExpr!="groupby()" )
continue;
for ( auto & tFrontend : m_dFrontend )
if ( tFrontend.m_sName==tQueryItem.m_sAlias )
{
tFrontend.m_tLocator = p->m_tLocator;
tFrontend.m_eAttrType = p->m_eAttrType;
tFrontend.m_eAggrFunc = p->m_eAggrFunc;
}
}
}
void FrontendSchemaBuilder_c::RemapFacets()
{
// facets
if ( !m_tQuery.m_bFacet && !m_tQuery.m_bFacetHead )
return;
// remap MVA/JSON column to @groupby/@groupbystr in facet queries
const CSphColumnInfo * pGroupByCol = nullptr;
CSphString sJsonGroupBy;
if ( sphJsonNameSplit ( m_tQuery.m_sGroupBy.cstr() ) )
{
sJsonGroupBy = SortJsonInternalSet ( m_tQuery.m_sGroupBy );
pGroupByCol = m_tRes.m_tSchema.GetAttr ( sJsonGroupBy.cstr() );
}
if ( !pGroupByCol )
{
pGroupByCol = m_tRes.m_tSchema.GetAttr ( "@groupby" );
if ( !pGroupByCol )
return;
}
if ( m_tQuery.m_sGroupBy.IsEmpty() )
return;
for ( auto & tFrontend : m_dFrontend )
{
ESphAttr eAttr = tFrontend.m_eAttrType;
// checking _PTR attrs only because we should not have and non-ptr attr at this point
if ( m_tQuery.m_sGroupBy==tFrontend.m_sName && ( eAttr==SPH_ATTR_UINT32SET_PTR || eAttr==SPH_ATTR_INT64SET_PTR || eAttr==SPH_ATTR_FLOAT_VECTOR_PTR || eAttr==SPH_ATTR_JSON_FIELD_PTR ) )
{
tFrontend.m_tLocator = pGroupByCol->m_tLocator;
tFrontend.m_eAttrType = pGroupByCol->m_eAttrType;
tFrontend.m_eAggrFunc = pGroupByCol->m_eAggrFunc;
}
}
}
| 10,832
|
C++
|
.cpp
| 314
| 31.261146
| 269
| 0.679052
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,955
|
indexfilebase.cpp
|
manticoresoftware_manticoresearch/src/indexfilebase.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "indexfilebase.h"
#include "indexfiles.h"
//////////////////////////////////////////////////////////////////////////
// index filebase + .szSuffix - like idx.meta, idx.foo, etc
CSphString IndexFileBase_c::GetFilename ( const char* szExt ) const
{
return SphSprintf ( "%s.%s", GetFilebase(), szExt );
}
// index filebase + .num - like idx.1, idx.100, etc.
CSphString IndexFileBase_c::GetFilename ( int iSuffix ) const
{
return SphSprintf ( "%s.%d", GetFilebase(), iSuffix );
}
// index filebase + .ext - like idx.sph, idx.tmp.spa
CSphString IndexFileBase_c::GetFilename ( ESphExt eExt ) const
{
return SphSprintf ( "%s%s", GetFilebase(), sphGetExt ( eExt ) );
}
// index filebase + .tmp + .ext - like idx.sph, idx.tmp.spa
CSphString IndexFileBase_c::GetTmpFilename ( ESphExt eExt ) const
{
return SphSprintf ( "%s.tmp%s", GetFilebase(), sphGetExt ( eExt ) );
}
| 1,355
|
C++
|
.cpp
| 34
| 38.558824
| 80
| 0.684411
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,956
|
searchdbuddy.cpp
|
manticoresoftware_manticoresearch/src/searchdbuddy.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "searchdtask.h"
#include "netreceive_ql.h"
#include "client_session.h"
#include <boost/asio/io_service.hpp>
#include <boost/asio/read_until.hpp>
#include <boost/process.hpp>
#if _WIN32
#include <boost/winapi/process.hpp>
#endif
#include "replication/portrange.h"
#include "netfetch.h"
#include "searchdbuddy.h"
static std::unique_ptr<boost::process::child> g_pBuddy;
static CSphString g_sPath;
static CSphString g_sListener4Buddy;
static CSphString g_sUrlBuddy;
static CSphString g_sStartArgs;
static const int PIPE_BUF_SIZE = 2048;
static std::unique_ptr<boost::asio::io_service> g_pIOS;
static std::vector<char> g_dPipeBuf ( PIPE_BUF_SIZE );
static CSphVector<char> g_dLogBuf ( PIPE_BUF_SIZE );
static std::unique_ptr<boost::process::async_pipe> g_pPipe;
enum class BuddyState_e
{
NONE,
STARTING,
WORK,
STOPPED,
FAILED
};
static BuddyState_e g_eBuddy { BuddyState_e::NONE };
static int g_iRestartCount = 0;
static int64_t g_tmStarting = 0;
static int g_iTask = 0;
static const int g_iBuddyLoopSleep = 15;
static const int g_iRestartMax = 3;
static const int g_iStartMaxTimeout = val_from_env ( "MANTICORE_BUDDY_TIMEOUT", 3 ); // max start timeout 3 sec
static int g_iBuddyVersion = 3;
static bool g_bBuddyVersion = false;
extern CSphString g_sStatusVersion;
static CSphString g_sContainerName;
// windows docker needs port XXX:9999 port mapping
static std::unique_ptr<FreePortList_i> g_pBuddyPortList { nullptr };
ScopedPort_c g_tBuddyPort;
static BuddyState_e TryToStart ( const char * sArgs, CSphString & sError );
static CSphString GetUrl ( const ListenerDesc_t & tDesc );
static CSphString BuddyGetPath ( const CSphString & sPath, const CSphString & sPluginDir, bool bHasBuddyPath, int iHostPort, const CSphString & sDataDir );
static void BuddyStop ();
#if _WIN32
static CSphString g_sBuddyBind = "0.0.0.0:9999"; // It does not matter for docker
#else
static CSphString g_sBuddyBind = "127.0.0.1";
#endif
#if _WIN32
struct BuddyWindow_t : boost::process::detail::handler_base
{
// this function will be invoked at child process constructor before spawning process
template <class WindowsExecutor>
void on_setup ( WindowsExecutor & e ) const
{
e.creation_flags = boost::winapi::CREATE_NEW_CONSOLE_;
}
};
#endif
struct PreservedStd_t : boost::process::detail::handler, boost::process::detail::uses_handles
{
std::vector<int> get_used_handles ()
{
int iStdOut = STDOUT_FILENO;
int iStdErr = STDERR_FILENO;
std::vector<int> dHandles { iStdOut, iStdErr };
return dHandles;
}
};
static std::pair<Str_t, Str_t> CheckLine ( Str_t sLine )
{
CSphVector<Str_t> dWords;
sphSplitApply ( sLine.first, sLine.second, " \n\r", [&dWords] ( const char * pLine, int iLen ) { dWords.Add ( Str_t { pLine, iLen } ); } );
if ( dWords.GetLength()>=4 && StrEq ( dWords[0], "Buddy" ) && StrEq ( dWords[2], "started" ) )
return std::make_pair ( dWords[1], dWords[3] );
return std::make_pair ( Str_t(), Str_t() );
}
static std::pair<Str_t, Str_t> GetLines ( Str_t tSrc )
{
std::pair<Str_t, Str_t> tLines { tSrc, Str_t() };
char * sStart = const_cast<char *>( tSrc.first );
if ( !tSrc.second )
{
sStart[tSrc.second] = '\0';
return tLines;
}
const char * sEnd = sStart + tSrc.second - 1;
char * sCur = sStart;
while ( sCur<=sEnd && *sCur!='\n' && *sCur!='\r' )
sCur++;
tLines.first = Str_t( tSrc.first, sCur - sStart + 1 );
while ( sCur<=sEnd && sphIsSpace ( *sCur ) )
sCur++;
tLines.second = Str_t( sCur, sEnd - sCur + 1 );
// check the rest of text for only empty lines
while ( sCur<=sEnd && !sphIsSpace ( *sCur ) )
sCur++;
if ( sCur==sEnd )
tLines.second.second = 0;
return tLines;
}
static void AddTail ( Str_t tLine )
{
char * pDst = g_dLogBuf.AddN ( tLine.second );
memcpy ( pDst, tLine.first, tLine.second );
}
static bool HasLineEnd ( Str_t tBuf, Str_t tLine )
{
const char * sEnd = tBuf.first + tBuf.second;
return ( ( tLine.first + tLine.second )<sEnd );
}
static void DaemonLogBuddyLine ( Str_t tLine )
{
// if some message already at the buffer - lets copy the tail there and print the whole message from the buffer - not from the message
if ( g_dLogBuf.GetLength() )
{
AddTail ( tLine );
tLine = g_dLogBuf;
}
const int LOG_LINE_HEADER = 60; // daemon adds timestamp, tid then our buddy header
const int iBufMax = GetDaemonLogBufSize() - LOG_LINE_HEADER;
while ( tLine.second>0 )
{
int iLen = Min ( iBufMax, tLine.second );
sphInfo ( "[BUDDY] %.*s", iLen, tLine.first );
tLine.first += iLen;
tLine.second -= iLen;
}
if ( g_dLogBuf.GetLength() )
g_dLogBuf.Resize ( 0 );
}
static void LogPipe ( Str_t tSrc )
{
CSphVector<Str_t> dLines;
sphSplitApply ( tSrc.first, tSrc.second, "\n\r", [&dLines] ( const char * pLine, int iLen ) { dLines.Add ( Str_t { pLine, iLen } ); } );
if ( !dLines.GetLength() )
return;
// whole pipe buffer without line end - collect into line buffer
Str_t tLine0 = dLines[0];
if ( !HasLineEnd ( tSrc, tLine0 ) )
{
AddTail ( tLine0 );
return;
}
DaemonLogBuddyLine ( tLine0 );
if ( dLines.GetLength()==1 )
return;
for ( int i=1; i<dLines.GetLength()-1; i++ )
DaemonLogBuddyLine ( dLines[i] );
Str_t tLineLast = dLines.Last();
// last line could be without line end - collect into line buffer
if ( HasLineEnd ( tSrc, tLineLast ) )
{
DaemonLogBuddyLine ( tLineLast );
} else
{
AddTail ( tLineLast );
}
}
static void ReadFromPipe ( const boost::system::error_code & tGotCode, std::size_t iSize )
{
if ( tGotCode.failed() )
return;
if ( !iSize )
return;
Str_t sLineRef ( g_dPipeBuf.data(), (int)iSize );
// regular work log all lines from the buddy output
if ( g_eBuddy!=BuddyState_e::STARTING )
{
LogPipe ( sLineRef );
return;
}
auto [sLine, sLinesTail] = GetLines ( sLineRef );
// at the BuddyState_e::STARTING parsing buddy output
auto [sBuddyVer, sBuddyAddr] = CheckLine ( sLine );
if ( IsEmpty ( sBuddyAddr ) )
{
g_eBuddy = BuddyState_e::FAILED;
sphWarning ( "[BUDDY] invalid output, should be 'Buddy ver, started address:port', got '%.*s'", sLineRef.second, sLineRef.first );
return;
}
CSphString sError;
CSphString sAddr ( sBuddyAddr );
ListenerDesc_t tListen = ParseListener ( sAddr.cstr(), &sError );
if ( tListen.m_eProto==Proto_e::UNKNOWN || !sError.IsEmpty() )
{
g_eBuddy = BuddyState_e::FAILED;
sphWarning ( "[BUDDY] invalid output, should be 'Buddy ver, started address:port', got '%.*s', parse error: %s", sLineRef.second, sLineRef.first, sError.cstr() );
return;
}
// buddy really started and ready to serve queries
#ifdef _WIN32
tListen.m_iPort = g_tBuddyPort;
#endif
g_sUrlBuddy = GetUrl( tListen );
g_eBuddy = BuddyState_e::WORK;
g_iRestartCount = 0;
sphInfo ( "[BUDDY] started %.*s '%s' at %s", sBuddyVer.second, sBuddyVer.first, g_sStartArgs.cstr(), g_sUrlBuddy.cstr() );
if ( sLinesTail.second )
LogPipe ( sLinesTail );
if ( !g_bBuddyVersion )
{
g_bBuddyVersion = true;
g_sStatusVersion.SetSprintf ( "%s (buddy %.*s)", g_sStatusVersion.cstr(), sBuddyVer.second, sBuddyVer.first );
}
}
static void BuddyPipe_fn ( const boost::system::error_code & tGotCode, std::size_t iSize )
{
ReadFromPipe ( tGotCode, iSize );
if ( g_pPipe && iSize )
g_pPipe->async_read_some( boost::asio::buffer ( g_dPipeBuf ), BuddyPipe_fn );
}
static BuddyState_e BuddyCheckLive()
{
assert ( g_eBuddy==BuddyState_e::WORK );
std::error_code tErrorCode;
if ( g_pBuddy && g_pBuddy->running ( tErrorCode ) )
return BuddyState_e::WORK;
// need to restart buddy as curent buddy got killed
if ( g_pBuddy )
sphWarning ( "[BUDDY] terminated, exit code %d", tErrorCode.value() );
return BuddyState_e::FAILED;
}
static void BuddyTryRestart()
{
if ( g_eBuddy!=BuddyState_e::FAILED )
return;
BuddyStop();
g_iRestartCount++;
if ( g_iRestartCount>=g_iRestartMax )
{
sphInfo ( "[BUDDY] restart amount of attempts (%d) has been exceeded", g_iRestartMax );
return;
}
CSphString sErorr;
g_eBuddy = TryToStart ( g_sStartArgs.cstr(), sErorr );
if ( g_eBuddy!=BuddyState_e::STARTING )
{
sphWarning ( "[BUDDY] failed to restart: %s", sErorr.cstr() );
BuddyStop();
} else
{
sphInfo ( "[BUDDY] restarting" );
}
}
static void BuddyWorkLoop()
{
auto pDesc = PublishSystemInfo ( "buddy check" );
while ( !sphInterrupted() )
{
while ( !g_pIOS->stopped() )
{
if ( !g_pIOS->poll_one() )
break;
}
g_pIOS->restart();
if ( g_eBuddy==BuddyState_e::STARTING && sphMicroTimer()>( g_tmStarting + g_iStartMaxTimeout * 1000 * 1000 ) )
{
sphWarning ( "[BUDDY] failed to start after %d sec", g_iStartMaxTimeout );
BuddyStop();
break;
}
if ( g_eBuddy==BuddyState_e::WORK )
g_eBuddy = BuddyCheckLive();
BuddyTryRestart();
if ( g_eBuddy==BuddyState_e::STARTING || g_eBuddy==BuddyState_e::WORK )
{
Threads::Coro::SleepMsec ( g_iBuddyLoopSleep );
} else
{
break;
}
}
}
BuddyState_e TryToStart ( const char * sArgs, CSphString & sError )
{
std::string sCmd = sArgs;
g_pBuddy.reset();
if ( g_pIOS )
g_pIOS->stop();
g_pPipe.reset();
g_pIOS.reset();
g_pIOS.reset ( new boost::asio::io_service );
g_pPipe.reset ( new boost::process::async_pipe ( *g_pIOS ) );
std::unique_ptr<boost::process::child> pBuddy;
std::error_code tErrorCode;
#if _WIN32
BuddyWindow_t tWnd;
pBuddy.reset ( new boost::process::child ( sCmd, ( boost::process::std_out & boost::process::std_err ) > *g_pPipe, tWnd, boost::process::limit_handles, boost::process::error ( tErrorCode ) ) );
#else
PreservedStd_t tPreserveStd;
pBuddy.reset ( new boost::process::child ( sCmd, ( boost::process::std_out & boost::process::std_err ) > *g_pPipe, boost::process::limit_handles, boost::process::error ( tErrorCode ) , tPreserveStd ) );
#endif
if ( !pBuddy->running ( tErrorCode ) )
{
sError.SetSprintf ( "'%s' terminated with exit code %d", sArgs, tErrorCode.value() );
return BuddyState_e::FAILED;
}
g_tmStarting = sphMicroTimer();
g_pBuddy = std::move ( pBuddy );
if ( g_pPipe )
g_pPipe->async_read_some( boost::asio::buffer ( g_dPipeBuf ), BuddyPipe_fn );
return BuddyState_e::STARTING;
}
CSphString GetUrl ( const ListenerDesc_t & tDesc )
{
CSphString sURI;
#ifdef _WIN32
// Use the constant host for Windows
sURI.SetSprintf ("http://host.docker.internal:%d", tDesc.m_iPort);
#else
// Original code for other systems
char sAddrBuf [ SPH_ADDRESS_SIZE ];
sphFormatIP ( sAddrBuf, sizeof(sAddrBuf), tDesc.m_uIP );
sURI.SetSprintf ( "http://%s:%d", sAddrBuf, tDesc.m_iPort );
#endif
return sURI;
}
static void SetContainerName ( const CSphString & sConfigPath )
{
DWORD uName = sphCRC32 ( sConfigPath.cstr() );
g_sContainerName.SetSprintf ( "buddy_%u", uName );
}
static void BuddyStopContainer()
{
#ifdef _WIN32
CSphString sCmd;
sCmd.SetSprintf ( "docker kill %s", g_sContainerName.cstr() );
std::error_code tErrorCode;
boost::process::child tStop ( sCmd.cstr(), boost::process::limit_handles, boost::process::error ( tErrorCode ) );
tStop.wait();
#endif
}
void BuddyStart ( const CSphString & sConfigPath, const CSphString & sPluginDir, bool bHasBuddyPath, const VecTraits_T<ListenerDesc_t> & dListeners, bool bTelemetry, int iThreads, const CSphString & sConfigFilePath, const CSphString & sDataDir )
{
const char* szHelperUrl = getenv ( "MANTICORE_HELPER_URL" );
if ( szHelperUrl )
{
// debug mode - don't start anything and consider env url valid and available
// - can start any kind of helper externally and just route to it using provided URL
g_sUrlBuddy = szHelperUrl;
g_eBuddy = BuddyState_e::WORK;
return;
}
SetContainerName ( sConfigFilePath );
// should not check buddy related code if buddy disabled at config
if ( bHasBuddyPath && sConfigPath.IsEmpty() )
return;
ARRAY_FOREACH ( i, dListeners )
{
const ListenerDesc_t & tDesc = dListeners[i];
if ( tDesc.m_eProto==Proto_e::SPHINX || tDesc.m_eProto==Proto_e::HTTP )
{
#ifdef _WIN32
g_pBuddyPortList.reset ( PortRange::Create ( "127.0.0.1", tDesc.m_iPort+100, 20 ) );
g_tBuddyPort = g_pBuddyPortList->AcquirePort();
#endif
g_sListener4Buddy = GetUrl ( tDesc );
break;
}
}
if ( g_sListener4Buddy.IsEmpty() )
{
sphWarning ( "[BUDDY] no SPHINX or HTTP listeners found, disabled" );
return;
}
if ( !IsCurlAvailable() )
{
sphWarning ( "[BUDDY] no curl found, disabled" );
return;
}
CSphString sPath = BuddyGetPath ( sConfigPath, sPluginDir, bHasBuddyPath, (int)g_tBuddyPort, sDataDir );
if ( sPath.IsEmpty() )
return;
// at WINDOWS need to stop docker conteiner that could left from the previous run or after daemon got crashed
BuddyStopContainer();
g_dLogBuf.Resize ( 0 );
g_sPath = sPath;
g_sStartArgs.SetSprintf ( "%s --listen=%s --bind=%s %s --threads=%d",
g_sPath.cstr(),
g_sListener4Buddy.cstr(),
g_sBuddyBind.cstr(),
( bTelemetry ? "" : "--disable-telemetry" ),
iThreads );
sphLogDebug ( "[BUDDY] start args: %s", g_sStartArgs.cstr() );
CSphString sErorr;
BuddyState_e eBuddy = TryToStart ( g_sStartArgs.cstr(), sErorr );
if ( eBuddy!=BuddyState_e::STARTING )
{
sphWarning ( "[BUDDY] failed to start: %s", sErorr.cstr() );
BuddyStop();
return;
}
g_eBuddy = eBuddy;
g_iTask = TaskManager::RegisterGlobal ( "buddy service" );
assert ( g_iTask>=0 && "failed to create buddy service task" );
TaskManager::StartJob ( g_iTask, BuddyWorkLoop );
}
void BuddyStop ()
{
#if _WIN32
if ( g_pBuddy )
{
std::error_code tErrorCode;
g_pBuddy->terminate ( tErrorCode );
if ( tErrorCode )
sphWarning ( "[BUDDY] stopped, exit code: %d", tErrorCode.value() );
BuddyStopContainer();
}
#else
if ( g_pBuddy )
{
// FIXME!!! migrate to boost::process::v2 and use
// proc.request_exit();
// proc.wait();
kill ( g_pBuddy->id(), SIGTERM );
std::error_code tErrorCode;
g_pBuddy->wait ( tErrorCode );
if ( tErrorCode )
sphLogDebug ( "[BUDDY] stopped, exit code: %d", tErrorCode.value() );
}
#endif
g_eBuddy = BuddyState_e::STOPPED;
g_pBuddy.reset();
}
void BuddyShutdown ()
{
BuddyStop();
g_tBuddyPort = ScopedPort_c();
g_pBuddyPortList.reset();
}
bool HasBuddy()
{
return ( g_eBuddy==BuddyState_e::WORK );
}
static bool BuddyQueryAddErrorBody ( JsonEscapedBuilder & tBuddyQuery, const VecTraits_T<BYTE> & dSrcHttpReply )
{
if ( !dSrcHttpReply.GetLength() )
return false;
const char * sErrorStart = (const char *)dSrcHttpReply.Begin();
const char * sBodyDel = strstr ( sErrorStart, "\r\n\r\n" );
if ( !sBodyDel )
return false;
const char * sBodyStart = sBodyDel + 4;
if ( (sBodyDel - sErrorStart )>dSrcHttpReply.GetLength() )
return false;
int iBodyLen = ( sErrorStart + dSrcHttpReply.GetLength() ) - sBodyStart;
Str_t sBodyBuf ( sBodyStart, iBodyLen );
JsonObj_c tError ( sBodyBuf );
if ( tError.Empty() )
return false;
tBuddyQuery.NamedValNE ( "body", sBodyBuf );
return true;
}
static std::pair<bool, CSphString> BuddyQuery ( bool bHttp, Str_t sQueryError, Str_t sPathQuery, Str_t sQuery, http_method eRequestType, const VecTraits_T<BYTE> & dSrcHttpReply )
{
if ( !HasBuddy() )
return { false, {} };
JsonEscapedBuilder tBuddyQuery;
{
auto tRoot = tBuddyQuery.Object();
tBuddyQuery.NamedString ( "type", bHttp ? "unknown json request" : "unknown sql request" );
{
tBuddyQuery.Named ( "error" );
auto tMessageRoot = tBuddyQuery.Object();
tBuddyQuery.NamedString ( "message", sQueryError );
if ( !BuddyQueryAddErrorBody ( tBuddyQuery, dSrcHttpReply ) )
tBuddyQuery.NamedValNE ( "body", "null" );
}
tBuddyQuery.NamedVal ( "version", g_iBuddyVersion );
if ( !bHttp )
tBuddyQuery.NamedString ( "user", session::GetClientSession()->m_sUser );
{
tBuddyQuery.Named ( "message" );
auto tMessageRoot = tBuddyQuery.Object();
tBuddyQuery.NamedString ( "path_query", sPathQuery );
tBuddyQuery.NamedString ( "body", sQuery );
tBuddyQuery.NamedString ( "http_method", ( bHttp ? http_method_str ( eRequestType ) : "" ) );
}
}
StrVec_t dHeaders;
dHeaders.Add ( SphSprintf ( "Request-ID: %d_%u", session::GetConnID(), sphCRC32 ( sQuery.first, sQuery.second, sphRand() ) ) );
// disable Expect: 100-continue
// as Expect: 100-continue header added by curl library do not with the buddy
dHeaders.Add ( "Expect:" );
return PostToHelperUrl ( g_sUrlBuddy, (Str_t)tBuddyQuery, dHeaders );
}
bool IsBuddyQuery ( const OptionsHash_t & hOptions )
{
CSphString * pProhibit = hOptions ( "user-agent" );
return pProhibit != nullptr && ( pProhibit->Begins ( "Manticore Buddy" ) );
}
struct BuddyReply_t
{
JsonObj_c m_tRoot;
CSphString m_sType;
JsonObj_c m_tMessage;
int m_iReplyHttpCode = 0;
};
static bool ParseReply ( char * sReplyRaw, BuddyReply_t & tParsed, CSphString & sError )
{
tParsed.m_tRoot = JsonObj_c ( sReplyRaw );
if ( !tParsed.m_tRoot )
{
sError.SetSprintf ( "unable to parse: %s", tParsed.m_tRoot.GetErrorPtr() );
return false;
}
if ( !tParsed.m_tRoot.IsObj() )
{
sError.SetSprintf ( "wrong reply format - not object" );
return false;
}
int iVer = 0;
if ( !tParsed.m_tRoot.FetchIntItem ( iVer, "version", sError, false ) )
return false;
if ( iVer>g_iBuddyVersion )
{
sError.SetSprintf ( "buddy reply version (%d) greater daemon version (%d), upgrade daemon binary", iVer, g_iBuddyVersion );
return false;
}
if ( iVer<1 )
{
sError.SetSprintf ( "wrong buddy reply version (%d), daemon version (%d), upgrade buddy", iVer, g_iBuddyVersion );
return false;
}
if ( !tParsed.m_tRoot.FetchStrItem ( tParsed.m_sType, "type", sError, false ) )
return false;
tParsed.m_tMessage = tParsed.m_tRoot.GetItem ( "message" );
if ( tParsed.m_tMessage.Empty() )
return false;
if ( !tParsed.m_tRoot.FetchIntItem ( tParsed.m_iReplyHttpCode, "error_code", sError, false ) )
return false;
return true;
}
static EHTTP_STATUS GetHttpStatusCode ( int iBuddyHttpCode, EHTTP_STATUS eReqHttpCode )
{
return ( iBuddyHttpCode>0 ? HttpGetStatusCodes ( iBuddyHttpCode ) : eReqHttpCode );
}
template<typename T>
bool ConvertValue ( const char * sName, const JsonObj_c & tMeta, T & tVal )
{
JsonObj_c tSrcVal = tMeta.GetItem ( sName );
if ( !tSrcVal )
return false;
if ( !tSrcVal.IsStr() )
return false;
int64_t iVal = 0;
double fVal = 0.0;
ESphJsonType eType;
if ( !sphJsonStringToNumber ( tSrcVal.SzVal(), strlen ( tSrcVal.SzVal() ), eType, iVal, fVal ) )
return false;
if ( eType==JSON_INT64 )
tVal = (T)iVal;
else
tVal = (T)fVal;
return true;
}
static void SetSessionMeta ( const JsonObj_c & tBudyyReply )
{
CSphString sTmpError;
JsonObj_c tSrcMeta = tBudyyReply.GetObjItem ( "meta", sTmpError, true );
if ( !tSrcMeta )
return;
ClientSession_c * pSession = session::GetClientSession();
if ( !pSession )
return;
auto & tLastMeta = pSession->m_tLastMeta;
tLastMeta = CSphQueryResultMeta();
// total => m_iMatches
ConvertValue ( "total", tSrcMeta, tLastMeta.m_iMatches );
// total_found => m_iTotalMatches
ConvertValue ( "total_found", tSrcMeta, tLastMeta.m_iTotalMatches );
// time => m_iQueryTime \ m_iRealQueryTime
float fTime = 0.0f;
if ( ConvertValue ( "time", tSrcMeta, fTime ) )
tLastMeta.m_iRealQueryTime = tLastMeta.m_iQueryTime = (int)( fTime * 1000.0f );
// total_relation => m_bTotalMatchesApprox
CSphString sRel;
if ( tSrcMeta.FetchStrItem ( sRel, "total_relation", sTmpError, true ) && !sRel.IsEmpty() && sRel=="gte" )
tLastMeta.m_bTotalMatchesApprox = true;
}
// we call it ALWAYS, because even with absolutely correct result, we still might reject it for '/cli' endpoint if buddy is not available or prohibited
bool ProcessHttpQueryBuddy ( HttpProcessResult_t & tRes, Str_t sSrcQuery, OptionsHash_t & hOptions, CSphVector<BYTE> & dResult, bool bNeedHttpResponse, http_method eRequestType )
{
if ( tRes.m_bOk || !HasBuddy() || tRes.m_eEndpoint==EHTTP_ENDPOINT::INDEX || IsBuddyQuery ( hOptions ) )
{
if ( tRes.m_eEndpoint==EHTTP_ENDPOINT::CLI )
{
if ( !HasBuddy() )
tRes.m_sError.SetSprintf ( "can not process /cli endpoint without buddy" );
else if ( IsBuddyQuery ( hOptions ) )
tRes.m_sError.SetSprintf ( "can not process /cli endpoint with User-Agent:Manticore Buddy" );
sphHttpErrorReply ( dResult, EHTTP_STATUS::_501, tRes.m_sError.cstr() );
}
assert ( dResult.GetLength()>0 );
return tRes.m_bOk;
}
myinfo::SetCommand ( sSrcQuery.first );
AT_SCOPE_EXIT ( []() { myinfo::SetCommandDone(); } );
bool bHttpEndpoint = true;
if ( tRes.m_eEndpoint==EHTTP_ENDPOINT::SQL )
{
bHttpEndpoint = false;
// sql parser put \0 at error position at the reference string
// should use raw_query for buddy request
CSphString * pRawQuery = hOptions ( "raw_query" );
if ( pRawQuery && !pRawQuery->IsEmpty() )
{
sSrcQuery = FromStr ( *pRawQuery );
// need also to skip the head chars "query="
const char sQueryHead[] = "query=";
const int iQueryHeadLen = sizeof ( sQueryHead )-1;
if ( pRawQuery->Begins( sQueryHead ) )
{
sSrcQuery.first +=iQueryHeadLen ;
sSrcQuery.second -= iQueryHeadLen;
}
}
}
auto tReplyRaw = BuddyQuery ( bHttpEndpoint, FromStr ( tRes.m_sError ), FromStr ( hOptions["full_url"] ), sSrcQuery, eRequestType, dResult );
if ( !tReplyRaw.first )
{
sphWarning ( "[BUDDY] [%d] error: %s", session::GetConnID(), tReplyRaw.second.cstr() );
return tRes.m_bOk;
}
CSphString sError;
BuddyReply_t tReplyParsed;
if ( !ParseReply ( const_cast<char *>( tReplyRaw.second.cstr() ), tReplyParsed, sError ) )
{
sphWarning ( "[BUDDY] [%d] %s: %s", session::GetConnID(), sError.cstr(), tReplyRaw.second.cstr() );
return tRes.m_bOk;
}
if ( ( bHttpEndpoint && tReplyParsed.m_sType!="json response" ) || ( !bHttpEndpoint && tReplyParsed.m_sType!="sql response" ) )
{
sphWarning ( "[BUDDY] [%d] wrong response type %s: %s", session::GetConnID(), tReplyParsed.m_sType.cstr(), tReplyRaw.second.cstr() );
return tRes.m_bOk;
}
CSphString sDumpBuf;
Str_t sDump;
if ( tReplyParsed.m_tMessage.IsStr() )
{
sDump = FromSz ( tReplyParsed.m_tMessage.SzVal() );
} else
{
CSphVector<BYTE> dBson;
bson::JsonObjToBson ( tReplyParsed.m_tMessage, dBson, true, false );
bson::Bson_c ( dBson ).BsonToJson ( sDumpBuf, false );
sDump = FromStr ( sDumpBuf );
}
EHTTP_STATUS eHttpStatus = GetHttpStatusCode ( tReplyParsed.m_iReplyHttpCode, tRes.m_eReplyHttpCode );
dResult.Resize ( 0 );
ReplyBuf ( FromStr ( sDump ), eHttpStatus, bNeedHttpResponse, dResult );
SetSessionMeta ( tReplyParsed.m_tRoot );
LogBuddyQuery ( sSrcQuery, BuddyQuery_e::HTTP );
return true;
}
static bool ConvertErrorMessage ( const Str_t & sStmt, std::pair<int, BYTE> tSavedPos, BYTE & uPacketID, const JsonObj_c & tMessage, GenericOutputBuffer_c & tOut )
{
if ( !tMessage.IsObj() )
return false;
CSphString sTmp;
CSphString sMsgError;
if ( !tMessage.FetchStrItem ( sMsgError, "error", sTmp, false ) )
return false;
// reset back out buff and packet
uPacketID = tSavedPos.second;
tOut.Rewind ( tSavedPos.first );
std::unique_ptr<RowBuffer_i> tBuddyRows ( CreateSqlRowBuffer ( &uPacketID, &tOut ) );
LogSphinxqlError ( sStmt, FromStr ( sMsgError ) );
session::GetClientSession()->m_sError = sMsgError;
session::GetClientSession()->m_tLastMeta.m_sError = sMsgError;
tBuddyRows->Error ( sMsgError.cstr() );
return true;
}
void ProcessSqlQueryBuddy ( Str_t sSrcQuery, Str_t tError, std::pair<int, BYTE> tSavedPos, BYTE & uPacketID, GenericOutputBuffer_c & tOut )
{
auto tReplyRaw = BuddyQuery ( false, tError, Str_t(), sSrcQuery, HTTP_GET, VecTraits_T<BYTE>() );
if ( !tReplyRaw.first )
{
LogSphinxqlError ( sSrcQuery.first, tError );
sphWarning ( "[BUDDY] [%d] error: %s", session::GetConnID(), tReplyRaw.second.cstr() );
return;
}
CSphString sError;
BuddyReply_t tReplyParsed;
if ( !ParseReply ( const_cast<char *>( tReplyRaw.second.cstr() ), tReplyParsed, sError ) )
{
LogSphinxqlError ( sSrcQuery.first, tError );
sphWarning ( "[BUDDY] [%d] %s: %s", session::GetConnID(), sError.cstr(), tReplyRaw.second.cstr() );
return;
}
if ( tReplyParsed.m_sType!="sql response" )
{
LogSphinxqlError ( sSrcQuery.first, tError );
sphWarning ( "[BUDDY] [%d] wrong response type %s: %s", session::GetConnID(), tReplyParsed.m_sType.cstr(), tReplyRaw.second.cstr() );
return;
}
if ( !tReplyParsed.m_tMessage.IsArray() )
{
if ( ConvertErrorMessage ( sSrcQuery, tSavedPos, uPacketID, tReplyParsed.m_tMessage, tOut ) )
return;
LogSphinxqlError ( sSrcQuery.first, tError );
sphWarning ( "[BUDDY] [%d] wrong reply format - not cli reply array: %s", session::GetConnID(), tReplyRaw.second.cstr() );
return;
}
// reset back out buff and packet
uPacketID = tSavedPos.second;
tOut.Rewind ( tSavedPos.first );
std::unique_ptr<RowBuffer_i> tBuddyRows ( CreateSqlRowBuffer ( &uPacketID, &tOut ) );
ConvertJsonDataset ( tReplyParsed.m_tMessage, sSrcQuery.first, *tBuddyRows );
SetSessionMeta ( tReplyParsed.m_tRoot );
LogBuddyQuery ( sSrcQuery, BuddyQuery_e::SQL );
}
#ifdef _WIN32
static CSphString g_sDefaultBuddyName ( "manticore-buddy" );
#else
static CSphString g_sDefaultBuddyName ( "manticore-buddy/bin/manticore-buddy" );
#endif
static CSphString g_sDefaultBuddyDockerImage ( "manticoresearch/manticore-executor:" BUDDY_EXECUTOR_VERNUM );
static CSphString GetFullBuddyPath ( const CSphString & sExecPath, const CSphString & sBuddyPath )
{
#ifdef _WIN32
assert ( !sExecPath.IsEmpty() );
CSphString sFullPath;
sFullPath.SetSprintf ( "\"%s\" \"%s\"", sExecPath.cstr(), sBuddyPath.cstr() );
return sFullPath;
#else
return sBuddyPath.cstr();
#endif
}
#ifdef _WIN32
CSphString BuddyGetPath ( const CSphString & sConfigPath, const CSphString & , bool bHasBuddyPath, int iHostPort, const CSphString & sDataDir )
{
if ( bHasBuddyPath )
return sConfigPath;
StringBuilder_c sCmd ( " " );
sCmd.Appendf ( "docker run --rm" ); // the head of the docker start command
sCmd.Appendf ( "-p %d:9999", iHostPort ); // port mapping
sCmd.Appendf ( "-v \"%s/%s\":/buddy", GET_MANTICORE_MODULES(), g_sDefaultBuddyName.cstr() ); // volume for buddy modules
sCmd.Appendf ( "-v manticore-usr_local_lib_manticore:/usr/local/lib/manticore -e PLUGIN_DIR=/usr/local/lib/manticore" ); // pesistent volume for buddy data
if ( !sDataDir.IsEmpty() ) // volume for data dir into container
sCmd.Appendf ( "-v \"%s\":/var/lib/manticore -e DATA_DIR=/var/lib/manticore", sDataDir.cstr() );
sCmd.Appendf ( "-w /buddy" ); // workdir is buddy root dir
sCmd.Appendf ( "--name %s", g_sContainerName.cstr() ); // the name of the buddy container is the hash of the config
sCmd.Appendf ( "%s /buddy/src/main.php", g_sDefaultBuddyDockerImage.cstr() ); // docker image and the buddy start command
return CSphString ( sCmd );
}
#else
CSphString BuddyGetPath ( const CSphString & sConfigPath, const CSphString & sPluginDir, bool bHasBuddyPath, int iHostPort, const CSphString & )
{
if ( bHasBuddyPath )
return sConfigPath;
CSphString sExecPath;
CSphString sPathToDaemon = GetPathOnly ( GetExecutablePath() );
CSphString sPathBuddy2Module;
sPathBuddy2Module.SetSprintf ( "%s/%s", GET_MANTICORE_MODULES(), g_sDefaultBuddyName.cstr() );
if ( sphFileExists ( sPathBuddy2Module.cstr() ) )
return GetFullBuddyPath ( sExecPath, sPathBuddy2Module );
// check at the daemon location / cwd
CSphString sPathBuddy2Cwd;
sPathBuddy2Cwd.SetSprintf ( "%s%s", sPathToDaemon.cstr(), g_sDefaultBuddyName.cstr() );
if ( sphFileExists ( sPathBuddy2Cwd.cstr() ) )
return GetFullBuddyPath ( sExecPath, sPathBuddy2Cwd );
sphWarning ( "[BUDDY] no %s found at '%s', disabled", g_sDefaultBuddyName.cstr(), sPathBuddy2Module.cstr() );
return CSphString();
}
#endif
| 28,043
|
C++
|
.cpp
| 783
| 33.464879
| 245
| 0.709489
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,957
|
networking_daemon.cpp
|
manticoresoftware_manticoresearch/src/networking_daemon.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "networking_daemon.h"
#include "loop_profiler.h"
#include "net_action_accept.h"
#include "coroutine.h"
#include "tracer.h"
#if _WIN32
// Win-specific headers and calls
#include <io.h>
#else
// UNIX-specific headers and calls
#include <sys/wait.h>
#include <netdb.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#endif
using namespace Threads;
int64_t g_tmWaitUS = -1;
int g_iThrottleAction = 0;
/////////////////////////////////////////////////////////////////////////////
/// CSphWakeupEvent - used to kick poller from outside
/////////////////////////////////////////////////////////////////////////////
// event that wakes-up poll net loop from finished thread pool job
CSphWakeupEvent::CSphWakeupEvent ()
: PollableEvent_t()
, ISphNetAction ( m_iPollablefd )
{
m_uIOChange = NetPollEvent_t::SET_READ;
}
CSphWakeupEvent::~CSphWakeupEvent ()
{
Close ();
m_iSock = -1;
}
void CSphWakeupEvent::Wakeup ()
{
if ( !IsPollable() )
return;
if ( FireEvent () )
return;
int iErrno = PollableErrno ();
sphLogDebugv ( "failed to wakeup net thread ( error %d,'%s')", iErrno, strerrorm ( iErrno ) );
}
void CSphWakeupEvent::Process ()
{
if ( m_uGotEvents & NetPollEvent_t::IS_READ )
DisposeEvent();
}
void CSphWakeupEvent::NetLoopDestroying()
{}
enum class NetloopState_e : BYTE
{
UNKNOWN,
DESTRUCTING,
PROCESS_READY,
PROCESS_NEW,
REMOVE_OUTDATED,
POLL_IDLE,
};
static const char * NetloopStateName ( NetloopState_e eState )
{
switch (eState)
{
case NetloopState_e::UNKNOWN: return "-";
case NetloopState_e::DESTRUCTING: return "finishing";
case NetloopState_e::PROCESS_READY: return "process ready";
case NetloopState_e::PROCESS_NEW: return "process new";
case NetloopState_e::REMOVE_OUTDATED: return "remove outdated";
case NetloopState_e::POLL_IDLE: return "in polling";
}
return "unknown";
}
// display incoming string as client name in show threads
DEFINE_RENDER( ListenTaskInfo_t )
{
auto & tInfo = *(ListenTaskInfo_t *) const_cast<void*>(pSrc);
dDst.m_sChain << "Listen ";
dDst.m_sDescription << "tick: " << tInfo.m_uTick << " works: " << tInfo.m_uWorks << " state: " << NetloopStateName ( tInfo.m_eThdState );
}
/////////////////////////////////////////////////////////////////////////////
/// CSphNetLoop - main poller. Used for serving accepts and all socket operations
/////////////////////////////////////////////////////////////////////////////
class CSphNetLoop::Impl_c
{
// since it is impl, everything is private and accessible by friendship
friend class CSphNetLoop;
CSphVector<ISphNetAction *> m_dWorkInternal GUARDED_BY ( NetPoollingThread );
CSphVector<ISphNetAction *> m_dWorkExternal GUARDED_BY ( m_tExtLock );
sph::Spinlock_c m_tExtLock; // very short-term, so spinlock is ok.
WakeupEventRefPtr_c m_pWakeup;
LoopProfiler_t m_tPrf;
std::unique_ptr<NetPooller_c> m_pPoll;
CSphAutoEvent m_tWorkerFinished;
volatile bool m_bWorkerFinished = false;
public:
Impl_c ()
: m_pPoll { std::make_unique<NetPooller_c> ( 1000, g_iThrottleAction )}
{
m_pWakeup = new CSphWakeupEvent;
if ( m_pWakeup->IsPollable() )
{
sphLogDebugvv ( "Setup wakeup as %d, %d", m_pWakeup->m_iSock, (int) m_pWakeup->m_iTimeoutTimeUS );
m_pPoll->SetupEvent ( m_pWakeup );
} else
sphWarning ( "net-loop use timeout due to %s", m_pWakeup->m_sError.cstr () );
m_dWorkExternal.Reserve ( 1000 );
m_dWorkInternal.Reserve ( 1000 );
}
void SetListeners ( const VecTraits_T<Listener_t>& dListeners, CSphNetLoop* pParent ) REQUIRES ( NetPoollingThread )
{
for ( const auto& dListener : dListeners )
{
NetPoolEventRefPtr_c pCur { new NetActionAccept_c ( dListener, pParent ) };
sphLogDebugvv ( "setup listener as %d, %d", pCur->m_iSock, (int)pCur->m_iTimeoutTimeUS );
m_pPoll->SetupEvent ( pCur.Leak() ); // will be released when netloop finishes, in NetLoopDestroying call
}
}
private:
static inline ListenTaskInfo_t* pMyInfo()
{
#ifndef NDEBUG
auto pRes = myinfo::ref<ListenTaskInfo_t>();
assert (pRes);
return pRes;
#else
return myinfo::ref<ListenTaskInfo_t>();
#endif
}
void TerminateSessions() REQUIRES ( NetPoollingThread ) NO_THREAD_SAFETY_ANALYSIS
{
sphLogDebugv ( "TerminateSessions() (%p) invoked", this );
assert ( m_dWorkInternal.IsEmpty () );
m_pPoll->ProcessAll( [this] ( NetPollEvent_t * pWork ) NO_THREAD_SAFETY_ANALYSIS
{
SafeAddRef ( pWork );
m_dWorkExternal.Add ( (ISphNetAction*) pWork );
});
m_dWorkExternal.Uniq();
for ( auto* pWork : m_dWorkExternal )
{
// deal with closed sockets which lives exclusively in m_pPoll (and so, would be removed immediately on RemoveEvent() )
CSphRefcountedPtr<ISphNetAction> pWorkKeeper { pWork };
m_pPoll->RemoveEvent ( pWork );
pWork->NetLoopDestroying();
}
}
// add actions planned by jobs
void PickNewActions () REQUIRES ( NetPoollingThread )
{
m_tPrf.StartExt ();
sph::Spinlock_lock tExtLock { m_tExtLock };
auto iExtLen = m_dWorkExternal.GetLength();
m_tPrf.m_iPerfExt = iExtLen;
pMyInfo ()->m_uWorks = iExtLen;
if ( iExtLen )
{
assert ( m_dWorkInternal.IsEmpty ());
m_dWorkInternal.SwapData ( m_dWorkExternal );
}
m_tPrf.EndTask ();
}
void EnqueueNewActions() REQUIRES ( NetPoollingThread )
{
// add actions planned by jobs
PickNewActions();
pMyInfo()->m_uWorks = m_dWorkInternal.GetLength();
m_tPrf.StartNext();
m_dWorkInternal.for_each ( [&] ( ISphNetAction* pWork ) REQUIRES ( NetPoollingThread ) {
assert ( pWork );
m_pPoll->SetupEvent ( pWork );
pWork->Release();
} );
m_dWorkInternal.Resize ( 0 );
m_tPrf.EndTask();
}
int ProcessReady () REQUIRES ( NetPoollingThread )
{
int iProcessedEvents = 0;
for ( NetPollEvent_t & dReady : *m_pPoll )
{
m_tPrf.StartAt ();
assert ( dReady.m_uGotEvents );
auto pWork = (ISphNetAction *) &dReady;
m_pPoll->RemoveTimeout ( pWork ); // ensure that timer (if any) will no more fire
pWork->Process ();
++m_tPrf.m_iPerfEv;
++iProcessedEvents;
m_tPrf.EndTask ();
}
return iProcessedEvents;
}
void Poll ( int64_t tmLastWaitUS ) REQUIRES ( NetPoollingThread )
{
// lets spin net-loop thread without syscall\sleep\wait up to net_wait period
// in case we got events recently or call job that might finish early
// otherwise poll ( 1 ) \ epoll_wait ( 1 ) put off this thread and introduce some latency, ie
// sysbench test with 1 thd and 3 empty indexes reports:
// 3k qps for net-loop without spin-wait
// 5k qps for net-loop with spin-wait
int64_t iWaitUS = 0LL;
if ( g_tmWaitUS < 0 || ( MonoMicroTimer() - tmLastWaitUS > g_tmWaitUS ) )
iWaitUS = m_pWakeup ? WAIT_UNTIL_TIMEOUT : 1000LL;
m_tPrf.StartPoll ();
// need positive timeout for communicate threads back and shutdown
Threads::IdleTimer_t _;
pMyInfo ()->m_eThdState = NetloopState_e::POLL_IDLE;
m_pPoll->Wait ( iWaitUS );
m_tPrf.EndTask ();
}
void LoopNetPoll () REQUIRES ( NetPoollingThread )
{
auto _ = PublishTaskInfo ( new ListenTaskInfo_t );
int64_t tmLastWaitUS = MonoMicroTimer();
while ( !sphInterrupted() )
{
m_tPrf.Start();
Poll ( tmLastWaitUS );
pMyInfo ()->m_eThdState = NetloopState_e::PROCESS_READY;
++pMyInfo ()->m_uTick;
// handle events and collect stats
m_tPrf.StartTick();
sphLogDebugv ( "got events=%d, tick=%u, interrupted=%d", m_pPoll->GetNumOfReady (), pMyInfo ()->m_uTick, !!sphInterrupted () );
auto iProcessed = ProcessReady();
m_tPrf.EndTask();
pMyInfo ()->m_eThdState = NetloopState_e::PROCESS_NEW;
// setup or refresh handlers
EnqueueNewActions();
// will remove outdated even if they're just added (to avoid polling them)
iProcessed += RemoveOutdated ();
if ( iProcessed )
tmLastWaitUS = MonoMicroTimer();
m_tPrf.End();
}
m_bWorkerFinished = true;
m_tWorkerFinished.SetEvent ();
}
bool IsInTime ( NetPollEvent_t* pEvent, int64_t tmNowUS ) const
{
return pEvent->m_iTimeoutIdx < 0
|| pEvent->m_iTimeoutTimeUS <= 0
|| !sph::TimeExceeded ( pEvent->m_iTimeoutTimeUS, tmNowUS, m_pPoll->TickGranularity() );
}
int RemoveOutdated () REQUIRES ( NetPoollingThread )
{
pMyInfo ()->m_eThdState = NetloopState_e::REMOVE_OUTDATED;
int64_t tmNowUS = MonoMicroTimer();
m_tPrf.StartRemove();
int iRemoved = 0;
// remove outdated items on no signals
m_pPoll->ProcessAll([&] ( NetPollEvent_t * pEvent ) REQUIRES ( NetPoollingThread )
{
// skip eternal (non-timeouted)
if ( IsInTime ( pEvent, tmNowUS ) )
return;
sphLogDebugv ( "%p bailing on timeout no signal, sock=%d", pEvent, pEvent->m_iSock );
pEvent->m_uGotEvents = NetPollEvent_t::IS_TIMEOUT;
auto* pWork = (ISphNetAction*)pEvent;
pWork->Process();
++iRemoved;
});
m_tPrf.EndTask();
return iRemoved;
}
void Kick ()
{
sphLogDebugvv ( "Kick" );
if ( m_pWakeup )
m_pWakeup->Wakeup ();
}
void StopNetLoop () // doesn't require NetPoollingThread
{
sphLogDebugv ( "StopNetLoop()" );
Kick ();
m_tWorkerFinished.WaitEvent ();
// it is safe to call terminations here, since netpool is stopped. So, declare that we're 'netpool' now.
ScopedRole_c thPoll ( NetPoollingThread );
TerminateSessions();
sphLogDebugv ( "StopNetLoop() succeeded" );
}
bool AddAction ( ISphNetAction * pElem ) EXCLUDES ( NetPoollingThread )
{
sphLogDebugvv ( "AddAction action as %d, events %u, timeout %d", pElem->m_iSock, pElem->m_uIOChange, (int) pElem->m_iTimeoutTimeUS );
if ( m_bWorkerFinished )
return false;
{
sph::Spinlock_lock tExtLock { m_tExtLock };
SafeAddRef ( pElem );
m_dWorkExternal.Add ( pElem );
}
Kick();
return true;
}
void RemoveEvent ( NetPollEvent_t * pEvent ) REQUIRES ( NetPoollingThread )
{
sphLogDebugv ( "RemoveEvent()" );
m_pPoll->RemoveEvent ( pEvent );
}
};
/////////////////////////////////////////////////////////////////////////////
CSphNetLoop::CSphNetLoop ()
: m_pImpl { std::make_unique<Impl_c> () }
{}
void CSphNetLoop::SetListeners ( const VecTraits_T<Listener_t>& dListeners )
{
ScopedRole_c thPoll ( NetPoollingThread );
m_pImpl->SetListeners ( dListeners, this );
}
CSphNetLoop::~CSphNetLoop ()
{
sphLogDebugv ( "~CSphNetLoop() (%p) completed", this );
}
void CSphNetLoop::LoopNetPoll ()
{
m_pImpl->LoopNetPoll();
}
void CSphNetLoop::StopNetLoop()
{
m_pImpl->StopNetLoop ();
};
bool CSphNetLoop::AddAction ( ISphNetAction * pElem ) EXCLUDES ( NetPoollingThread )
{
return m_pImpl->AddAction ( pElem );
}
void CSphNetLoop::RemoveEvent ( NetPollEvent_t * pEvent ) REQUIRES ( NetPoollingThread )
{
m_pImpl->RemoveEvent ( pEvent );
}
namespace {
CSphNetLoop* g_pNetLoop = nullptr;
}
CSphNetLoop* GetAvailableNetLoop()
{
return g_pNetLoop;
}
void SetAvailableNetLoop ( CSphNetLoop* pNetLoop )
{
g_pNetLoop = pNetLoop;
}
/////////////////////////////////////////////////////////////////////////////
/// SockWrapper_c::Impl_c internal async socket implementation
/////////////////////////////////////////////////////////////////////////////
class SockWrapper_c::Impl_c final : public ISphNetAction
{
friend class SockWrapper_c;
CSphRefcountedPtr<CSphNetLoop> m_pNetLoop;
Threads::Coro::AtomicWaker_c m_tWaker;
int64_t m_iWriteTimeoutUS;
int64_t m_iReadTimeoutUS;
int64_t m_iTotalSent = 0;
int64_t m_iTotalReceived = 0;
Impl_c ( int iSocket, CSphNetLoop * pNetLoop );
~Impl_c () final;
int64_t SockRecv ( char * pBuf, int64_t iLeftBytes );
int64_t SockSend ( const char * pBuf, int64_t iLeftBytes );
int64_t GetTimeoutUS () const;
void SetTimeoutUS ( int64_t iTimeoutUS );
int64_t GetWTimeoutUS () const;
void SetWTimeoutUS ( int64_t iTimeoutUS );
void EngageWaiterAndYield( int64_t tmTimeUntilUs );
int SockPoll ( int64_t tmTimeUntilUs, bool bWrite );
int SockPollClassic ( int64_t tmTimeUntilUs, bool bWrite );
int SockPollNetloop ( int64_t tmTimeUntilUs, bool bWrite );
int64_t GetTotalSent () const;
int64_t GetTotalReceived () const;
void ParentDestroying();
inline void Wake()
{
m_tWaker.WakeOnce ( true ); // true means 'vip'
}
inline void FinallyAbort()
{
m_uGotEvents = NetPollEvent_t::IS_TIMEOUT;
Wake();
}
public:
void Process () REQUIRES ( NetPoollingThread ) final;
void NetLoopDestroying () REQUIRES ( NetPoollingThread ) final;
};
SockWrapper_c::Impl_c::Impl_c ( int iSocket, CSphNetLoop * pNetLoop )
: ISphNetAction ( iSocket )
, m_pNetLoop ( pNetLoop )
{
SetWTimeoutUS ( g_iWriteTimeoutS * S2US );
SetTimeoutUS ( g_iReadTimeoutS * S2US );
SafeAddRef ( pNetLoop );
}
void SockWrapper_c::Impl_c::ParentDestroying ()
{
if ( m_iSock>=0 )
{
sphLogDebugv ( "Destroying and closing sock=%d", m_iSock );
sphSockClose ( m_iSock );
if ( IsLinked () && m_pNetLoop )
{
m_uIOChange = NetPollEvent_t::SET_CLOSED;
m_pNetLoop->AddAction ( this );
}
}
}
SockWrapper_c::Impl_c::~Impl_c ()
{
sphLogDebugv ( "SockWrapper_c::Impl_c::~Impl_c (); sent " INT64_FMT ", received " INT64_FMT, m_iTotalSent, m_iTotalReceived);
}
// Netpool is already stopped, so it is th-safe here.
void SockWrapper_c::Impl_c::NetLoopDestroying () REQUIRES ( NetPoollingThread )
{
sphLogDebugv ( "SockWrapper_c::Impl_c::NetLoopDestroying ()" );
// if we're not finished - setting m_pNetLoop to null will just switch us to classic blocking polling.
m_pNetLoop = nullptr;
sphLogDebugv ( "SockWrapper_c::Impl_c::NetLoopDestroying () will resume sleeping job" );
// if we're in state of waiting - forcibly set awake reason to 'timeout', then wake up.
FinallyAbort();
}
// this is blocking function. Aware, that current thread may change when it finished.
void SockWrapper_c::Impl_c::EngageWaiterAndYield ( int64_t tmTimeUntilUs )
{
assert ( m_pNetLoop );
sphLogDebugv ( "Coro::YieldWith (m_iEvent=%u), timeout %d", m_uGotEvents, int(tmTimeUntilUs-MonoMicroTimer ()) );
m_iTimeoutTimeUS = tmTimeUntilUs;
// switch context (go to poll)
Threads::Coro::YieldWith ( [this, pWorker = Coro::CurrentWorker()] {
m_tWaker.Assign ( Threads::CreateWaker ( pWorker ) );
if ( !m_pNetLoop->AddAction ( this ) ) // can fail if backend netpool is already finished
FinallyAbort();
});
// here we switched back by call m_tWaker.Wake().
sphLogDebugv ( "EngageWaiterAndYield awake (m_iSock=%d, events=%u)", m_iSock, m_uGotEvents );
}
// Called in strict order after EngageWaiterAndYield.
// timer is removed and will NOT tick anyway in the future.
// event itself is deactivated (for socket it is one-shot), or timed-out (need to be removed)
// If it was called >once - search for the problem in caller place.
void SockWrapper_c::Impl_c::Process () REQUIRES ( NetPoollingThread )
{
if ( CheckSocketError() || m_uGotEvents == IS_TIMEOUT ) // real socket error
m_pNetLoop->RemoveEvent ( this );
Wake();
}
// classic version - blocking via sphPoll
int SockWrapper_c::Impl_c::SockPollClassic ( int64_t tmTimeUntilUs, bool bWrite )
{
int64_t tmMicroLeft = ( tmTimeUntilUs - MonoMicroTimer() );
if ( tmMicroLeft<0 )
tmMicroLeft = 0;
Threads::IdleTimer_t _;
int iRes = sphPoll ( m_iSock, tmMicroLeft, bWrite );
sphLogDebugv ( "sphPoll for alone returned %d in " INT64_FMT " Us", iRes, tmMicroLeft - tmTimeUntilUs + MonoMicroTimer() );
return iRes;
}
// netloop version - yield rescheduling and yield
// Command flow:
// EngageWaiterAndYield stores current context into continuation, then suspend it and call AddAction to setup polling.
// Net polling thread then register our socket in the poll/epoll/kqueue and poll it.
// when an event fired, or timeout happened, it calls 'process', which, in turn,
// schedules our continuation. So, we returned back from EngageWaiter (most probably already in another thread), and
// process events.
int SockWrapper_c::Impl_c::SockPollNetloop ( int64_t tmTimeUntilUs, bool bWrite )
{
m_uIOChange = NetPollEvent_t::SET_EDGEONESHOT | ( bWrite ? NetPollEvent_t::SET_WRITE : NetPollEvent_t::SET_READ );
EngageWaiterAndYield ( tmTimeUntilUs );
if ( m_uGotEvents == NetPollEvent_t::IS_TIMEOUT )
{
sphSockSetErrno ( ETIMEDOUT );
return 0;
}
return CheckSocketError () ? -1 : 1;
}
// as usual sphPoll - returns 1 on success, 0 on timeout, -1 on error.
int SockWrapper_c::Impl_c::SockPoll ( int64_t tmTimeUntilUs, bool bWrite )
{
TRACE_CONN ( "conn", "SockPoll" );
session::SetTaskState ( TaskState_e::NET_IDLE );
AT_SCOPE_EXIT ( [bWrite] { session::SetTaskState ( bWrite ? TaskState_e::NET_WRITE : TaskState_e::NET_READ ); } );
return m_pNetLoop ? SockPollNetloop ( tmTimeUntilUs, bWrite ) : SockPollClassic ( tmTimeUntilUs, bWrite );
}
int64_t SockWrapper_c::Impl_c::SockSend ( const char * pBuf, int64_t iLeftBytes )
{
auto iRes = sphSockSend ( m_iSock, pBuf, iLeftBytes );
if ( iRes>0 )
m_iTotalSent += iRes;
return iRes;
}
int64_t SockWrapper_c::Impl_c::SockRecv ( char * pBuf, int64_t iLeftBytes )
{
sphLogDebugvv ( "SockRecv %d, for " INT64_FMT " bytes", m_iSock, iLeftBytes );
auto iRes = sphSockRecv ( m_iSock, pBuf, iLeftBytes );
if ( iRes>0 )
m_iTotalReceived += iRes;
return iRes;
}
int64_t SockWrapper_c::Impl_c::GetTimeoutUS () const
{
return m_iReadTimeoutUS;
}
void SockWrapper_c::Impl_c::SetTimeoutUS ( int64_t iTimeoutUS )
{
m_iReadTimeoutUS = iTimeoutUS;
}
int64_t SockWrapper_c::Impl_c::GetWTimeoutUS () const
{
return m_iWriteTimeoutUS;
}
void SockWrapper_c::Impl_c::SetWTimeoutUS ( int64_t iTimeoutUS )
{
m_iWriteTimeoutUS = iTimeoutUS;
}
int64_t SockWrapper_c::Impl_c::GetTotalSent() const
{
return m_iTotalSent;
}
int64_t SockWrapper_c::Impl_c::GetTotalReceived () const
{
return m_iTotalReceived;
}
/////////////////////////////////////////////////////////////////////////////
/// SockWrapper_c frontend implementation
/////////////////////////////////////////////////////////////////////////////
SockWrapper_c::SockWrapper_c ( int iSocket, CSphNetLoop * pNetLoop )
: m_pImpl { new Impl_c ( iSocket, pNetLoop ) }
{}
SockWrapper_c::~SockWrapper_c ()
{
assert ( m_pImpl );
m_pImpl->ParentDestroying();
SafeRelease ( m_pImpl );
}
int64_t SockWrapper_c::SockSend ( const char * pData, int64_t iLen )
{
return m_pImpl->SockSend ( pData, iLen );
}
int64_t SockWrapper_c::SockRecv ( char * pData, int64_t iLen )
{
assert ( m_pImpl );
return m_pImpl->SockRecv ( pData, iLen );
}
int SockWrapper_c::SockPoll ( int64_t tmTimeUntilUs, bool bWrite )
{
assert ( m_pImpl );
return m_pImpl->SockPoll ( tmTimeUntilUs, bWrite );
}
int64_t SockWrapper_c::GetTimeoutUS () const
{
assert ( m_pImpl );
return m_pImpl->GetTimeoutUS();
}
void SockWrapper_c::SetTimeoutUS ( int64_t iTimeoutUS )
{
assert ( m_pImpl );
m_pImpl->SetTimeoutUS (iTimeoutUS);
}
int64_t SockWrapper_c::GetWTimeoutUS () const
{
assert ( m_pImpl );
return m_pImpl->GetWTimeoutUS ();
}
void SockWrapper_c::SetWTimeoutUS ( int64_t iTimeoutUS )
{
assert ( m_pImpl );
m_pImpl->SetWTimeoutUS ( iTimeoutUS );
}
int64_t SockWrapper_c::GetTotalSent() const
{
assert ( m_pImpl );
return m_pImpl->GetTotalSent();
}
int64_t SockWrapper_c::GetTotalReceived () const
{
assert ( m_pImpl );
return m_pImpl->GetTotalReceived ();
}
int SockWrapper_c::GetSocket () const
{
assert ( m_pImpl );
return m_pImpl->m_iSock;
}
/////////////////////////////////////////////////////////////////////////////
/// Helpers
/////////////////////////////////////////////////////////////////////////////
// Send a blob into socket.
// Alone worker will use waiting in poll.
// Cooperative worker will yield and resume instead of waiting.
static bool SyncSend ( SockWrapper_c* pSock, const char * pBuffer, int64_t iLen, CSphString & sError )
{
if ( sphInterrupted () )
sphLogDebugv ( "SIGTERM in SockWrapper_c::Send" );
if ( iLen<=0 )
{
sError = "empty input buffer";
return false;
}
sphLogDebugv ( "AsyncSend " INT64_FMT " bytes, sock=%d", iLen, pSock->GetSocket() );
int64_t iMaxTimerPeriodUS = pSock->GetWTimeoutUS();
int64_t iLastTimestamp = MonoMicroTimer();
auto iTimeoutUntilUs = iLastTimestamp + iMaxTimerPeriodUS; // in microseconds
for (;;)
{
do
{
auto iRes = pSock->SockSend ( pBuffer, iLen );
if ( iRes<0 )
{
int iErrno = sphSockGetErrno ();
if ( iErrno==EINTR ) // interrupted before any data was sent; just loop
continue;
if ( iErrno!=EAGAIN && iErrno!=EWOULDBLOCK )
{
sError.SetSprintf ( "send() failed: %d: %s", iErrno, sphSockError ( iErrno ) );
sphWarning ( "%s, sock=%d", sError.cstr(), pSock->GetSocket() );
return false;
}
} else
{
if ( iLen==iRes )
return true; // we're finished
iLen -= iRes;
pBuffer += iRes;
}
iLastTimestamp = MonoMicroTimer();
sphLogDebugv ("Still need to send " INT64_FMT " bytes, sock=%d", iLen, pSock->GetSocket() );
} while ( pSock->SockPoll ( iTimeoutUntilUs, true ) );
if ( !g_bTimeoutEachPacket )
{
auto iTimeoutFromLastActivity = iLastTimestamp + iMaxTimerPeriodUS;
if ( iTimeoutFromLastActivity > MonoMicroTimer() )
{
sphWarning ( "sync-send action for more %d", (int)( iTimeoutFromLastActivity - MonoMicroTimer() ) );
iTimeoutUntilUs = iTimeoutFromLastActivity;
continue;
}
}
break;
}
sError = "timed out while performing SyncSend to flush network buffers";
sphWarning ( "%s, sock=%d", sError.cstr(), pSock->GetSocket() );
return false;
}
// fetch a chunk of bytes from socket and adjust position/rest of bytes
static int AsyncRecvNBChunk ( SockWrapper_c * pSock, BYTE *& pBuf, int & iLeftBytes )
{
// try to receive next chunk
auto iRes = pSock->SockRecv ( (char*) pBuf, iLeftBytes );
sphLogDebugv ( "AsyncRecvNBChunk " INT64_FMT " when read %d bytes, sock=%d", iRes, iLeftBytes, pSock->GetSocket() );
if ( iRes>0 )
{
pBuf += iRes;
iLeftBytes -= iRes;
}
return (int) iRes;
}
#if _WIN32
#define EMULATE_EINTR 1
#endif
//#define EMULATE_EINTR 1
// flexible receive data from socket. iLen indicates, how many bytes to read. iSpace - how many is _safe_ to read.
// (i.e. if you want 1 byte and space for 100 - you can read up to 100 bytes, but not 101).
static int SyncSockRead ( SockWrapper_c * pSock, BYTE* pBuf, int iLen, int iSpace, bool bIntr )
{
assert ( iSpace>=iLen );
// try to receive available chunk
int iReceived = AsyncRecvNBChunk ( pSock, pBuf, iSpace );
sphLogDebugv ( "AsyncRecvNBChunk %d bytes (%d requested), sock=%d", iReceived, iLen, pSock->GetSocket() );
if ( iReceived>=iLen ) // all, and m.b. more read in one-shot
return iReceived;
// immediate error (most probably it is E_AGAIN; check!)
if ( iReceived<0 )
iReceived = 0;
iLen -= iReceived;
if ( !iLen )
return iReceived;
int64_t iMaxTimerPeriodUS = Max ( S2US, pSock->GetTimeoutUS() );
int64_t tmMaxTimer = MonoMicroTimer() + iMaxTimerPeriodUS; // in microseconds
int iErr, iRes;
while ( iLen>0 )
{
int64_t tmNextStopUs = tmMaxTimer;
int64_t iLastTimestamp = MonoMicroTimer();
#if EMULATE_EINTR
// Windows EINTR emulation
// Ctrl-C will not interrupt select on Windows, so let's handle that manually
// forcibly limit select() to 100 ms, and check flag afterwards
if ( bIntr )
tmNextStopUs = Min ( tmMaxTimer, iLastTimestamp + 100000 );
#endif
if ( ( tmNextStopUs - iLastTimestamp )<=0 )
break; // timed out
// wait until there is data
sphLogDebugv ( "Still need to receive %d bytes, sock=%d", iLen, pSock->GetSocket() );
iRes = pSock->SockPoll ( tmNextStopUs, false );
// if there was EINTR, retry
// if any other error, bail
if ( iRes==-1 )
{
// only let SIGTERM (of all them) to interrupt, and only if explicitly allowed
iErr = sphSockGetErrno();
if ( iErr==EINTR )
{
if ( !( sphInterrupted () && bIntr ))
continue;
sphLogDebugv( "SyncSockRead: select got SIGTERM, exit -1, sock=%d", pSock->GetSocket() );
}
return -1;
}
// if there was a timeout, report it as an error
if ( iRes==0 )
{
#if EMULATE_EINTR
// EINTR emulation
if ( bIntr )
{
// got that SIGTERM
if ( sphInterrupted() )
{
sphLogDebugv ( "SyncSockRead: got SIGTERM emulation on Windows, exit -1" );
sphSockSetErrno ( EINTR );
return -1;
}
// timeout might not be fully over just yet, so re-loop
continue;
}
#endif
if ( g_bTimeoutEachPacket )
{
auto iTimeoutFromLastActivity = iLastTimestamp + iMaxTimerPeriodUS;
auto iMonoTimer = MonoMicroTimer();
if ( tmMaxTimer < iMonoTimer && iTimeoutFromLastActivity > iMonoTimer )
{
tmMaxTimer = iTimeoutFromLastActivity;
continue;
}
}
sphLogDebugv ( "return TIMEOUT, sock=%d", pSock->GetSocket() );
sphSockSetErrno( ETIMEDOUT );
return -1;
}
// try to receive next chunk
iRes = AsyncRecvNBChunk( pSock, pBuf, iSpace );
sphLogDebugv ( "SyncSockRead: AsyncRecvNBChunk returned %d, sock=%d", iRes, pSock->GetSocket() );
// if there was eof, we're done
if ( !iRes )
{
sphLogDebugv ( "SyncSockRead: connection reset, sock=%d", pSock->GetSocket() );
sphSockSetErrno( ECONNRESET );
return -1;
}
// if there was EINTR, retry
// if any other error, bail
if ( iRes==-1 )
{
// only let SIGTERM (of all them) to interrupt, and only if explicitly allowed
iErr = sphSockGetErrno();
if ( iErr==EINTR )
{
if ( !( sphInterrupted () && bIntr ))
continue;
sphLogDebugv( "SyncSockRead: select got SIGTERM, exit -1, sock=%d", pSock->GetSocket() );
}
return -1;
}
iReceived += iRes;
iLen-= iRes;
// avoid partial buffer loss in case of signal during the 2nd (!) read
bIntr = false;
}
// if there was a timeout, report it as an error
if ( iLen>0 )
{
sphLogDebugv ( "SyncSockRead: at exit byt still need to receive %d bytes, return TIMEOUT, sock=%d", iLen, pSock->GetSocket() );
sphSockSetErrno( ETIMEDOUT );
return -1;
}
return iReceived;
}
/////////////////////////////////////////////////////////////////////////////
/// AsyncNetInputBuffer_c
/////////////////////////////////////////////////////////////////////////////
AsyncNetInputBuffer_c::AsyncNetInputBuffer_c ()
: STORE ( NET_MINIBUFFER_SIZE )
, InputBuffer_c ( m_pData, NET_MINIBUFFER_SIZE )
{
Resize ( 0 );
m_iLen = 0;
}
Proto_e AsyncNetInputBuffer_c::Probe()
{
Proto_e eResult = Proto_e::UNKNOWN;
m_bIntr = false;
int iRest = 0;
if ( !HasBytes() )
{
iRest = Min ( NET_MINIBUFFER_SIZE, GetRoomForTail() );
if ( !iRest )
return eResult; // hard limit reached
AppendData ( 0, iRest, true );
}
auto iHas = HasBytes();
if (!iHas)
{
sphLogDebugv ( "+++++ Light probing revealed nothing, try blocking" );
AppendData ( 1, iRest, true );
iHas = HasBytes ();
}
StringBuilder_c sBytes;
auto tBlob = Tail ();
if ( tBlob.second >=4 )
{
if ( !memcmp (tBlob.first,"\0\0\0\1",4) )
{
sBytes << "SphinxAPI, usual byte order";
eResult = Proto_e::SPHINX;
}
else if ( !memcmp ( tBlob.first, "GET", 3)
|| !memcmp ( tBlob.first, "POST", 4 )
|| !memcmp ( tBlob.first, "PUT", 3 )
|| !memcmp ( tBlob.first, "DELE", 4 )
|| !memcmp ( tBlob.first, "HEAD", 4 ) )
{
eResult = Proto_e::HTTP;
sBytes << "HTTP";
}
else if ( !memcmp ( tBlob.first, "\1\0\0\0", 4 ) )
{
sBytes << "SphinxAPI, inversed byte order";
eResult = Proto_e::SPHINX;
}
else
{
eResult = Proto_e::HTTPS; // m.b. more accurate probe on ssl header, but not important right now
sBytes << "Unknown, assume HTTPS";
}
} else
{
sBytes.StartBlock ( " ", "Short [", "]" );
for ( int i = 0; i<tBlob.second; ++i )
sBytes << tBlob.first[i];
sBytes.FinishBlocks();
}
sphLogDebugv ( "+++++ Probing revealed %d bytes: %s", iHas, sBytes.cstr() );
return eResult;
}
bool AsyncNetInputBuffer_c::ReadFrom( int iLen, bool bIntr )
{
m_bIntr = false;
if ( !IsLessMaxPacket ( iLen ) )
return false;
auto iRest = iLen - HasBytes();
if ( iRest<=0 ) // lazy case: prev ReadFrom already achieved requested bytes
return true;
int iGot = AppendData ( iRest, iRest, bIntr );
// set error message if AppendData returned less without any error
if ( iGot<iRest && !GetError() )
SetError ( "invalid size read %d(%d)", iRest, iGot );
return ( !GetError() );
}
// ensure iSpace bytes in buffer, then read at least iNeed, up to vector's GetLimit().
// returns -1 on error, or N of appended bytes.
int AsyncNetInputBuffer_c::AppendData ( int iNeed, int iSpace, bool bIntr )
{
assert ( iNeed<=iSpace );
int iGot = ReadFromBackend ( iNeed, iSpace, bIntr );
if ( sphInterrupted () && bIntr )
{
SetError ( "AsyncNetInputBuffer_c::AppendData: got SIGTERM, return -1" );
sphLogDebugv ( "%s", GetErrorMessage().cstr() );
m_bIntr = true;
return -1;
}
if ( iGot==-1 )
{
auto iErr = sphSockPeekErrno ();
m_bIntr = iErr==EINTR;
if ( iErr!=ETIMEDOUT && iErr!=ECONNRESET ) // FIXME!!! connection timeout activated by timer skipped but for not the persist connection should reported up to the handler
{
SetError ( "AsyncNetInputBuffer_c::AppendData: error %d (%s) return -1", iErr, strerrorm ( iErr ) );
sphLogDebugv ( "%s", GetErrorMessage().cstr() );
}
return -1;
}
AddN ( iGot );
m_iLen = GetLength();
m_bIntr = false;
return iGot;
}
int AsyncNetInputBuffer_c::ReadAny ()
{
m_bIntr = false;
auto iRest = GetRoomForTail();
if ( !iRest )
return 0;
// ReadAny used only for HTTP header read (NET_MINIBUFFER_SIZE is enough for header) and for initial HTTP fetch with the empty buffer - no need to allocate up to g_iMaxPacketSize
if ( !HasBytes() )
iRest = Min ( NET_MINIBUFFER_SIZE, iRest );
return AppendData ( 1, iRest, true );
}
ByteBlob_t AsyncNetInputBuffer_c::Tail ()
{
return { m_pCur, HasBytes ()};
}
ByteBlob_t AsyncNetInputBuffer_c::PopTail ( int iSize )
{
const BYTE * pBuf = nullptr;
auto iBufLen = HasBytes ();
if ( iSize>=0 )
{
assert ( iSize <= iBufLen );
iBufLen = iSize;
}
if ( iBufLen>0 && GetBytesZerocopy ( &pBuf, iBufLen ) )
return { pBuf, iBufLen };
else
return { nullptr, 0 };
}
int AsyncNetInputBuffer_c::GetRoomForTail()
{
int iHardLimit = GetMaxPacketSize();
if ( iHardLimit-m_iLen<=0 )
DiscardProcessed ( -1 );
return iHardLimit-m_iLen;
}
VecTraits_T<BYTE> AsyncNetInputBuffer_c::AllocateBuffer ( int iSpace )
{
auto iPos = DiscardAndReserve ( int ( m_pCur - m_pBuf ), GetLength() + iSpace );
m_pBuf = ByteBlob_t ( *this ).first; // realign after possible reserve, byteblob ensures it is not nullptr
m_pCur = m_pBuf + iPos;
return { AddN(0), GetLimit() - GetLength() };
}
void AsyncNetInputBuffer_c::DiscardProcessed ( int iHowMany )
{
auto iPos = int ( m_pCur-m_pBuf );
assert ( m_iLen == GetLength() );
assert ( iHowMany >=-1 ); // we don't even expect values less then -1.
assert ( iPos <= m_iLen );
auto iOldLen = m_iLen;
switch ( iHowMany ) {
case 0:
if ( iPos==m_iLen )
{
Resize(0);
iHowMany = iPos;
}
break;
case -1: iHowMany = iPos;
// [[clang::fallthrough]];
default:
Remove ( iPos-iHowMany, iHowMany );
break;
}
m_pCur -= iHowMany;
m_iLen = STORE::GetLength();
sphLogDebugv ( "DiscardProcessed(%d) iPos=%d->0, iLen=%d->%d, sock=%d", iHowMany, (int)iPos, iOldLen, m_iLen, ClientTaskInfo_t::Info().GetSocket() );
}
BYTE AsyncNetInputBuffer_c::Terminate ( int iPos, BYTE uNewVal )
{
auto pPos = m_pCur + iPos;
auto pLimit = m_pData + GetLimit();
if ( pPos >= pLimit ) // no place for terminator
{
auto iIdx = m_pCur-m_pBuf;
ReserveGap(1);
m_pBuf = m_pData;
m_pCur = m_pBuf+iIdx;
pPos = m_pCur+iPos;
}
return std::exchange ( *const_cast<BYTE*> ( pPos ), uNewVal );
}
void AsyncNetBuffer_c::SyncErrorState()
{
if ( GenericOutputBuffer_c::GetError() )
{
assert ( !GenericOutputBuffer_c::GetErrorMessage().IsEmpty() );
InputBuffer_c::SetError ( "%s", GenericOutputBuffer_c::GetErrorMessage().cstr() );
}
}
void AsyncNetBuffer_c::ResetError()
{
InputBuffer_c::ResetError();
GenericOutputBuffer_c::ResetError();
}
/////////////////////////////////////////////////////////////////////////////
/// AsyncBufferedSocket_c - provides wrapper for sending and receiving
/////////////////////////////////////////////////////////////////////////////
class AsyncBufferedSocket_c final : public AsyncNetBuffer_c
{
std::unique_ptr<SockWrapper_c> m_pSocket;
int ReadFromBackend ( int iNeed, int iSpace, bool bIntr ) final
{
assert ( iNeed<= iSpace );
auto dBuf = AllocateBuffer ( iSpace );
return SyncSockRead ( m_pSocket.get(), dBuf.begin(), iNeed, dBuf.GetLength(), bIntr );
}
bool SendBuffer ( const VecTraits_T<BYTE> & dData ) final
{
assert ( m_pSocket );
if ( dData.IsEmpty () )
return true; // nothing to send
CSphScopedProfile tProf ( m_pProfile, SPH_QSTATE_NET_WRITE );
bool bSent = SyncSend ( m_pSocket.get(), (const char *) m_dBuf.begin(), m_dBuf.GetLength64(), GenericOutputBuffer_c::m_sError );
GenericOutputBuffer_c::m_bError = !bSent;
return bSent;
}
public:
explicit AsyncBufferedSocket_c ( std::unique_ptr<SockWrapper_c> pSock )
: m_pSocket ( std::move ( pSock ) )
{}
void SetWTimeoutUS ( int64_t iTimeoutUS ) final { m_pSocket->SetWTimeoutUS ( iTimeoutUS ); }
int64_t GetWTimeoutUS () const final { return m_pSocket->GetWTimeoutUS (); }
void SetTimeoutUS ( int64_t iTimeoutUS ) final { m_pSocket->SetTimeoutUS ( iTimeoutUS ); }
int64_t GetTimeoutUS () const final { return m_pSocket->GetTimeoutUS (); }
int64_t GetTotalSent() const final { return m_pSocket->GetTotalSent(); }
int64_t GetTotalReceived() const final { return m_pSocket->GetTotalReceived(); }
};
// main fabric
std::unique_ptr<AsyncNetBuffer_c> MakeAsyncNetBuffer ( std::unique_ptr<SockWrapper_c> pSock )
{
return std::make_unique<AsyncBufferedSocket_c> ( std::move ( pSock ) );
}
void LogNetError ( const char * sMsg, bool bDebug )
{
int iCID = session::GetConnID();
const char * sClientIP = session::szClientName();
int iSock = ClientTaskInfo_t::Info().GetSocket();
if ( bDebug )
sphLogDebugv ( "conn %s(%d), sock=%d: %s", sClientIP, iCID, iSock, sMsg );
else
sphWarning ( "conn %s(%d), sock=%d: %s", sClientIP, iCID, iSock, sMsg );
}
| 34,341
|
C++
|
.cpp
| 1,035
| 30.684058
| 180
| 0.678833
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,958
|
sphinxutils.cpp
|
manticoresoftware_manticoresearch/src/sphinxutils.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
/// @file sphinxutils.cpp
/// Implementations for Sphinx utilities shared classes.
#include "sphinxutils.h"
#include "sphinxint.h"
#include "sphinxplugin.h"
#include "sphinxstem.h"
#include "fileutils.h"
#include "threadutils.h"
#include "indexfiles.h"
#include "datetime.h"
#include <codecvt>
#include <ctype.h>
#include <fcntl.h>
#include <errno.h>
#if __has_include(<execinfo.h>)
#include <execinfo.h>
#endif
#include <sstream>
#include <iomanip>
#if _WIN32
#include <io.h> // for ::open on windows
#include <dbghelp.h>
#pragma comment(linker, "/defaultlib:dbghelp.lib")
#pragma message("Automatically linking with dbghelp.lib")
#else
#include <sys/wait.h>
#include <signal.h>
#include <glob.h>
#endif
#if __has_include(<sys/prctl.h>)
#define HAVE_SYS_PRCTL_H
#include <sys/prctl.h>
#endif
#include "libutils.h"
#include "coroutine.h"
#if __has_include (<malloc.h>)
#include <malloc.h>
#endif
#if __has_include(<jemalloc/jemalloc.h>)
#define HAVE_JEMALLOC_JEMALLOC_H
#include <jemalloc/jemalloc.h>
#endif
#if _WIN32
CSphString g_sWinInstallPath;
#endif
//////////////////////////////////////////////////////////////////////////
// STRING FUNCTIONS
//////////////////////////////////////////////////////////////////////////
inline static char * ltrim ( char * sLine )
{
while ( *sLine && sphIsSpace(*sLine) )
sLine++;
return sLine;
}
inline static char * rtrim ( char * sLine )
{
char * p = sLine + strlen(sLine) - 1;
while ( p>=sLine && sphIsSpace(*p) )
p--;
p[1] = '\0';
return sLine;
}
inline static char * trim ( char * sLine )
{
return ltrim ( rtrim ( sLine ) );
}
// split alnums by non-alnums symbols
// (alnums are [0..9a..zA..Z-_])
void sphSplit ( StrVec_t & dOut, const char * sIn )
{
if ( !sIn )
return;
const char * p = sIn;
while ( *p )
{
// skip non-alphas
while ( (*p) && !sphIsAlpha(*p) )
p++;
if ( !(*p) )
break;
// this is my next token
assert ( sphIsAlpha(*p) );
const char * sNext = p;
while ( sphIsAlpha(*p) )
p++;
if ( sNext!=p )
dOut.Add().SetBinary ( sNext, int (p-sNext) );
}
}
template<bool BOUNDS>
void SplitApply_T ( const char * sIn, int iSize, const char * sBounds, StrFunctor && dFunc )
{
if ( !sIn )
return;
if (!dFunc)
return;
const char * p = sIn;
if ( iSize<0 ) iSize = (int) strlen (p);
const char * pEnd = p + iSize;
while ( p < pEnd )
{
// skip non-alphas
if constexpr ( BOUNDS )
{
while ( ( p<pEnd ) && strchr ( sBounds, *p )!=nullptr )
p++;
} else
{
while ( ( p<pEnd ) && !sphIsAlpha ( *p ) )
p++;
}
if ( p>=pEnd )
break;
// this is my next token
assert ( sphIsAlpha ( *p ) || ( sBounds && strchr ( sBounds, *p )==nullptr ) );
const char * sNext = p;
if constexpr ( BOUNDS )
{
while ( ( p<pEnd ) && strchr ( sBounds, *p )==nullptr )
++p;
} else
{
while ( ( p<pEnd ) && sphIsAlpha ( *p ) )
++p;
}
if ( sNext!=p )
dFunc ( sNext, int ( p - sNext ) );
}
}
void sphSplitApply ( const char * sIn, int iSize, StrFunctor && dFunc )
{
SplitApply_T<false> ( sIn, iSize, nullptr, std::move( dFunc ) );
}
void sphSplitApply ( const char * sIn, int iSize, const char * sBounds, StrFunctor && dFunc )
{
SplitApply_T<true> ( sIn, iSize, sBounds, std::move ( dFunc ) );
}
// split by any char from sBounds.
// if line starts from a bound char, first splitted str will be an empty string
void sphSplit ( StrVec_t & dOut, const char * sIn, int iLen, const char * sBounds )
{
sph::Split ( sIn, iLen, sBounds, [&] ( const char * sToken, int iTokenLen ) {
dOut.Add ().SetBinary ( sToken, iTokenLen );
} );
}
StrVec_t sphSplit ( const char * sIn, int iLen, const char * sBounds )
{
StrVec_t dResult;
sphSplit ( dResult, sIn, iLen, sBounds );
return dResult;
}
void sphSplit ( StrVec_t & dOut, const char * sIn, const char * sBounds )
{
sphSplit ( dOut, sIn, -1, sBounds );
}
StrVec_t sphSplit( const char* sIn, const char* sBounds )
{
return sphSplit ( sIn, -1, sBounds);
}
// split by any char from sBounds.
// if line starts from a bound char, first splitted str will be an empty string
void sph::Split ( StrtVec_t& dOut, const char* sIn, int iLen, const char* sBounds )
{
sph::Split ( sIn, iLen, sBounds, [&] ( const char* sToken, int iTokenLen ) {
dOut.Add ( { sToken, iTokenLen } );
} );
}
StrtVec_t sph::Split ( const char* sIn, int iLen, const char* sBounds )
{
StrtVec_t dResult;
sph::Split ( dResult, sIn, iLen, sBounds );
return dResult;
}
void sph::Split ( StrtVec_t& dOut, const char* sIn, const char* sBounds )
{
sph::Split ( dOut, sIn, -1, sBounds );
}
StrtVec_t sph::Split ( const char* sIn, const char* sBounds )
{
return sph::Split ( sIn, -1, sBounds );
}
Str_t sph::Trim ( Str_t tIn )
{
if ( IsEmpty ( tIn ) )
return tIn;
const char* sStart = tIn.first;
const char* sEnd = sStart + tIn.second;
while ( sStart < sEnd && isspace ( (unsigned char)*sStart ) )
++sStart;
while ( sStart < sEnd && isspace ( (unsigned char)*sEnd ) )
--sEnd;
return { sStart, sEnd - sStart };
}
Str_t sph::Trim ( Str_t tIn, char cGarbage )
{
if ( IsEmpty ( tIn ) )
return tIn;
const char* sStart = tIn.first;
const char* sEnd = sStart + tIn.second;
while ( sStart < sEnd && cGarbage == *sStart )
++sStart;
while ( sStart < sEnd && cGarbage == *sEnd )
--sEnd;
return { sStart, sEnd - sStart };
}
template < typename T1, typename T2 >
static bool sphWildcardMatchRec ( const T1 * sString, const T2 * sPattern )
{
if ( !sString || !sPattern )
return false;
const T1 * s = sString;
const T2 * p = sPattern;
while ( *s )
{
switch ( *p )
{
case '\\':
// escaped char, strict match the next one literally
p++;
if ( *s++!=*p++ )
return false;
break;
case '?':
// match any character
s++;
p++;
break;
case '%':
// gotta match either 0 or 1 characters
// well, lets look ahead and see what we need to match next
p++;
// just a shortcut, %* can be folded to just *
if ( *p=='*' )
break;
// plain char after a hash? check the non-ambiguous cases
if ( !sphIsWild(*p) )
{
if ( s[0]!=*p )
{
// hash does not match 0 chars
// check if we can match 1 char, or it's a no-match
if ( s[1]!=*p )
return false;
s++;
break;
} else
{
// hash matches 0 chars
// check if we could ambiguously match 1 char too, though
if ( s[1]!=*p )
break;
// well, fall through to "scan both options" route
}
}
// could not decide yet
// so just recurse both options
if ( sphWildcardMatchRec ( s, p ) || sphWildcardMatchRec ( s+1, p ) )
return true;
return false;
case '*':
// skip all the extra stars and question marks
for ( p++; *p=='*' || *p=='?'; p++ )
if ( *p=='?' )
{
s++;
if ( !*s )
return p[1]=='\0';
}
// short-circuit trailing star
if ( !*p )
return true;
// so our wildcard expects a real character
// scan forward for its occurrences and recurse
while (true)
{
if ( !*s )
return false;
if ( *s==*p && sphWildcardMatchRec ( s+1, p+1 ) )
return true;
s++;
}
break;
default:
// default case, strict match
if ( *s++!=*p++ )
return false;
break;
}
}
// eliminate trailing stars
while ( *p=='*' )
p++;
// string done
// pattern should be either done too, or a trailing star, or a trailing hash
return p[0]=='\0'
|| ( p[0]=='*' && p[1]=='\0' )
|| ( p[0]=='%' && p[1]=='\0' );
}
template < typename T1, typename T2 >
static bool sphWildcardMatchDP ( const T1 * sString, const T2 * sPattern )
{
assert ( sString && sPattern && *sString && *sPattern );
const T1 * s = sString;
const T2 * p = sPattern;
bool bEsc = false;
int iEsc = 0;
const int iBufCount = 2;
const int iBufLenMax = SPH_MAX_WORD_LEN*3+4+1;
int dTmp [iBufCount][iBufLenMax];
dTmp[0][0] = 1;
dTmp[1][0] = 0;
for ( int i=0; i<iBufLenMax; i++ )
dTmp[0][i] = 1;
while ( *p )
{
// count, flag and skip escape char
if ( *p=='\\' )
{
iEsc++;
p++;
bEsc = true;
continue;
}
s = sString;
int iPattern = int (p - sPattern) + 1 - iEsc;
int iPrev = ( iPattern + 1 ) % iBufCount;
int iCur = iPattern % iBufCount;
// check the 1st wildcard
if ( !bEsc && ( *p=='*' || *p=='%' ) )
{
dTmp[iCur][0] = dTmp[iPrev][0];
} else
{
dTmp[iCur][0] = 0;
}
while ( *s )
{
int j = int (s - sString) + 1;
if ( !bEsc && *p=='*' )
{
dTmp[iCur][j] = dTmp[iPrev][j-1] || dTmp[iCur][j-1] || dTmp[iPrev][j];
} else if ( !bEsc && *p=='%' )
{
dTmp[iCur][j] = dTmp[iPrev][j-1] || dTmp[iPrev][j];
} else if ( *p==*s || ( !bEsc && *p=='?' ) )
{
dTmp[iCur][j] = dTmp[iPrev][j-1];
} else
{
dTmp[iCur][j] = 0;
}
s++;
}
p++;
bEsc = false;
}
return ( dTmp[( p-sPattern-iEsc ) % iBufCount][s-sString]!=0 );
}
template < typename T1, typename T2 >
bool sphWildcardMatchSpec ( const T1 * sString, const T2 * sPattern )
{
int iLen = 0;
int iStars = 0;
const T2 * p = sPattern;
while ( *p )
{
iStars += ( *p=='*' );
iLen++;
p++;
}
if ( iStars>10 || ( iStars>5 && iLen>17 ) )
return sphWildcardMatchDP ( sString, sPattern );
else
return sphWildcardMatchRec ( sString, sPattern );
}
bool sphWildcardMatch ( const char * sString, const char * sPattern, const int * pPattern )
{
if ( !sString || !sPattern || !*sString || !*sPattern )
return false;
// there are basically 4 codepaths, because both string and pattern may or may not contain utf-8 chars
// pPattern and pString are pointers to unpacked utf-8, pPattern can be precalculated (default is NULL)
int dString [ SPH_MAX_WORD_LEN + 1 ];
const int * pString = ( sphIsUTF8 ( sString ) && sphUTF8ToWideChar ( sString, dString, SPH_MAX_WORD_LEN ) ) ? dString : nullptr;
if ( !pString && !pPattern )
return sphWildcardMatchSpec ( sString, sPattern ); // ascii vs ascii
if ( pString && !pPattern )
return sphWildcardMatchSpec ( pString, sPattern ); // utf-8 vs ascii
if ( !pString && pPattern )
return sphWildcardMatchSpec ( sString, pPattern ); // ascii vs utf-8
// if ( pString && pPattern )
return sphWildcardMatchSpec ( pString, pPattern ); // utf-8 vs utf-8
// return false; // dead, but causes warn either by compiler, either by analysis. Leave as is.
}
bool HasWildcard ( const char * sVal )
{
if ( !sVal || !*sVal )
return false;
while ( *sVal )
{
if ( *sVal=='*' || *sVal=='?' || *sVal=='%' )
return true;
sVal++;
}
return false;
}
//////////////////////////////////////////////////////////////////////////
// cases are covered by TEST (functions, size_parser) gtest_functions.cpp
int64_t sphGetSize64 ( const char * sValue, char ** ppErr, int64_t iDefault )
{
if ( !sValue )
return iDefault;
if ( !strlen(sValue) )
return iDefault;
char * sEnd;
int64_t iRes = strtoll ( sValue, &sEnd, 10 );
switch ( *sEnd )
{
case 't': case 'T': iRes *= 1024;
// [[clang::fallthrough]];
case 'g': case 'G': iRes *= 1024;
// [[clang::fallthrough]];
case 'm': case 'M': iRes *= 1024;
// [[clang::fallthrough]];
case 'k': case 'K':
iRes *= 1024;
++sEnd;
break;
case '\0': break;
default:
// an error happened; write address to ppErr
if ( ppErr )
*ppErr = sEnd;
iRes = iDefault;
break;
}
return iRes;
}
// cases are covered by TEST ( functions, time_parser ) gtest_functions.cpp
int64_t sphGetTime64 ( const char* sValue, char** ppErr, int64_t iDefault )
{
if ( !sValue )
return iDefault;
if ( !strlen ( sValue ) )
return iDefault;
char* sEnd;
int64_t iRes = strtoll ( sValue, &sEnd, 10 );
switch ( *sEnd )
{
case 'w': case 'W' : iRes *= 7; // passthrow
// [[clang::fallthrough]];
case 'd': case 'D' : iRes *= 24; // passthrow
// [[clang::fallthrough]];
case 'h': case 'H' : iRes *= 3600 * 1000000LL; break;
case 'm': case 'M':
switch ( sEnd[1] )
{
case 's': case 'S' : iRes *= 1000; break;
default: iRes *= 1000000*60; break;
}
break;
case 'u': case 'U':
switch ( sEnd[1] )
{
case 's': case 'S' : break; // no multiplier for useconds
default:
if ( ppErr )
*ppErr = sEnd;
iRes = iDefault;
}
break;
case 's': case 'S': case '\0' : iRes *= 1000000; break; // sec is default
default:
if ( ppErr )
*ppErr = sEnd;
iRes = iDefault;
}
return iRes;
}
void CSphConfigSection::AddEntry ( const char * szKey, const char * szValue )
{
int iTag = m_iTag;
m_iTag++;
if ( (*this)(szKey) )
{
if ( (*this)[szKey].m_bTag )
{
// override value or list with a new value
SafeDelete ( (*this)[szKey].m_pNext ); // only leave the first array element
(*this)[szKey] = CSphVariant ( szValue, iTag ); // update its value
(*this)[szKey].m_bTag = false; // mark it as overridden
} else
{
// chain to tail, to keep the order
CSphVariant * pTail = &(*this)[szKey];
while ( pTail->m_pNext )
pTail = pTail->m_pNext;
pTail->m_pNext = new CSphVariant ( szValue, iTag );
}
} else
{
// just add
Add ( CSphVariant ( szValue, iTag ), szKey ); // FIXME! be paranoid, verify that it returned true
}
}
int64_t CSphConfigSection::GetSize64 ( const char * sKey, int64_t iDefault ) const
{
CSphVariant * pEntry = (*this)( sKey );
if ( !pEntry )
{
sphLogDebug ( "'%s' - nothing specified, using default value " INT64_FMT, sKey, iDefault );
return iDefault;
}
char * sErr = nullptr;
auto iRes = sphGetSize64 ( pEntry->cstr(), &sErr, iDefault );
if ( sErr && *sErr )
{
sphWarning ( "'%s = %s' parse error '%s'", sKey, pEntry->cstr(), sErr );
iRes = iDefault;
}
return iRes;
}
int CSphConfigSection::GetSize ( const char * sKey, int iDefault ) const
{
int64_t iSize = GetSize64 ( sKey, iDefault );
if ( iSize>INT_MAX )
{
sphWarning ( "'%s = " INT64_FMT "' clamped to %d(INT_MAX)", sKey, iSize, INT_MAX );
iSize = INT_MAX;
}
return (int)iSize;
}
int64_t CSphConfigSection::GetUsTime64S ( const char* sKey, int64_t iDefault ) const
{
CSphVariant* pEntry = ( *this ) ( sKey );
if ( !pEntry )
{
sphLogDebug ( "'%s' - nothing specified, using default value " INT64_FMT, sKey, iDefault );
return iDefault;
}
char* sErr = nullptr;
auto iRes = sphGetTime64 ( pEntry->cstr (), &sErr, iDefault );
if ( sErr && *sErr )
{
sphWarning ( "'%s = %s' parse error '%s'", sKey, pEntry->cstr (), sErr );
iRes = iDefault;
}
return iRes;
}
int64_t CSphConfigSection::GetUsTime64Ms ( const char* sKey, int64_t iDefault ) const
{
CSphVariant* pEntry = ( *this ) ( sKey );
if ( !pEntry )
{
sphLogDebug ( "'%s' - nothing specified, using default value " INT64_FMT, sKey, iDefault );
return iDefault;
}
StringBuilder_c sTmp;
sTmp << pEntry->strval () << "ms";
char* sErr = nullptr;
auto iRes = sphGetTime64 ( sTmp.cstr (), &sErr, iDefault );
if ( sErr && *sErr )
{
sphWarning ( "'%s = %s' parse error '%s'", sKey, sTmp.cstr (), sErr );
iRes = iDefault;
}
return iRes;
}
int CSphConfigSection::GetSTimeS ( const char* sKey, int iDefault ) const
{
int64_t iTimeUs = GetUsTime64S ( sKey, iDefault*1000000 ) / 1000000ll;
if ( iTimeUs>INT_MAX )
{
sphWarning ( "'%s = " INT64_FMT "' clamped to %d(INT_MAX)", sKey, iTimeUs, INT_MAX );
iTimeUs = INT_MAX;
}
return ( int ) iTimeUs;
}
int CSphConfigSection::GetMsTimeMs ( const char* sKey, int iDefault ) const
{
int64_t iTimeUs = GetUsTime64Ms ( sKey, iDefault*1000 ) / 1000ll;
if ( iTimeUs>INT_MAX )
{
sphWarning ( "'%s = " INT64_FMT "' clamped to %d(INT_MAX)", sKey, iTimeUs, INT_MAX );
iTimeUs = INT_MAX;
}
return ( int ) iTimeUs;
}
//////////////////////////////////////////////////////////////////////////
// CONFIG PARSER
//////////////////////////////////////////////////////////////////////////
/// key flags
enum
{
KEY_DEPRECATED = 1UL<<0,
KEY_LIST = 1UL<<1,
KEY_HIDDEN = 1UL<<2,
KEY_REMOVED = 1UL<<3
};
/// key descriptor for validation purposes
struct KeyDesc_t
{
const char * m_sKey; ///< key name
int m_iFlags; ///< flags
const char * m_sExtra; ///< extra stuff (deprecated name, for now)
};
/// Mandatory pattern: KeyDesc_t g_dKeys...
/// It is searched by doc/check.pl when parsing the file
/// and used to determine and collect all options
/// allowed keys for source section
static KeyDesc_t g_dKeysSource[] =
{
{ "type", 0, NULL },
{ "sql_host", 0, NULL },
{ "sql_user", 0, NULL },
{ "sql_pass", 0, NULL },
{ "sql_db", 0, NULL },
{ "sql_port", 0, NULL },
{ "sql_sock", 0, NULL },
{ "mysql_connect_flags", 0, NULL },
{ "mysql_ssl_key", 0, NULL }, // check.pl mysql_ssl
{ "mysql_ssl_cert", 0, NULL }, // check.pl mysql_ssl
{ "mysql_ssl_ca", 0, NULL }, // check.pl mysql_ssl
{ "mssql_winauth", 0, NULL },
{ "mssql_unicode", KEY_REMOVED, NULL },
{ "sql_query_pre", KEY_LIST, NULL },
{ "sql_query_pre_all", KEY_LIST, NULL },
{ "sql_query", 0, NULL },
{ "sql_query_range", 0, NULL },
{ "sql_range_step", 0, NULL },
{ "sql_query_killlist", 0, NULL },
{ "sql_attr_uint", KEY_LIST, NULL },
{ "sql_attr_bool", KEY_LIST, NULL },
{ "sql_attr_timestamp", KEY_LIST, NULL },
{ "sql_attr_str2ordinal", KEY_REMOVED | KEY_LIST, NULL },
{ "sql_attr_float", KEY_LIST, NULL },
{ "sql_attr_bigint", KEY_LIST, NULL },
{ "sql_attr_multi", KEY_LIST, NULL },
{ "sql_query_post", KEY_LIST, NULL },
{ "sql_query_post_index", KEY_LIST, NULL },
{ "sql_ranged_throttle", 0, NULL },
{ "sql_query_info", KEY_REMOVED, NULL },
{ "xmlpipe_command", 0, NULL },
{ "xmlpipe_field", KEY_LIST, NULL },
{ "xmlpipe_attr_uint", KEY_LIST, NULL },
{ "xmlpipe_attr_timestamp", KEY_LIST, NULL },
{ "xmlpipe_attr_str2ordinal", KEY_REMOVED | KEY_LIST, NULL },
{ "xmlpipe_attr_bool", KEY_LIST, NULL },
{ "xmlpipe_attr_float", KEY_LIST, NULL },
{ "xmlpipe_attr_bigint", KEY_LIST, NULL },
{ "xmlpipe_attr_multi", KEY_LIST, NULL },
{ "xmlpipe_attr_multi_64", KEY_LIST, NULL },
{ "xmlpipe_attr_string", KEY_LIST, NULL },
{ "xmlpipe_attr_wordcount", KEY_REMOVED | KEY_LIST, NULL },
{ "xmlpipe_attr_json", KEY_LIST, NULL },
{ "xmlpipe_field_string", KEY_LIST, NULL },
{ "xmlpipe_field_wordcount", KEY_REMOVED | KEY_LIST, NULL },
{ "xmlpipe_fixup_utf8", 0, NULL },
{ "sql_str2ordinal_column", KEY_LIST | KEY_REMOVED, NULL },
{ "unpack_zlib", KEY_LIST, NULL },
{ "unpack_mysqlcompress", KEY_LIST, NULL },
{ "unpack_mysqlcompress_maxsize", 0, NULL },
{ "odbc_dsn", 0, NULL },
{ "sql_joined_field", KEY_LIST, NULL },
{ "sql_attr_string", KEY_LIST, NULL },
{ "sql_attr_str2wordcount", KEY_REMOVED | KEY_LIST, NULL },
{ "sql_field_string", KEY_LIST, NULL },
{ "sql_field_str2wordcount", KEY_REMOVED | KEY_LIST, NULL },
{ "sql_file_field", KEY_LIST, NULL },
{ "sql_column_buffers", 0, NULL },
{ "sql_attr_json", KEY_LIST, NULL },
{ "hook_connect", KEY_HIDDEN, NULL },
{ "hook_query_range", KEY_HIDDEN, NULL },
{ "hook_post_index", KEY_HIDDEN, NULL },
{ "tsvpipe_command", 0, NULL },
{ "tsvpipe_field", KEY_LIST, NULL },
{ "tsvpipe_attr_uint", KEY_LIST, NULL },
{ "tsvpipe_attr_timestamp", KEY_LIST, NULL },
{ "tsvpipe_attr_bool", KEY_LIST, NULL },
{ "tsvpipe_attr_float", KEY_LIST, NULL },
{ "tsvpipe_attr_bigint", KEY_LIST, NULL },
{ "tsvpipe_attr_multi", KEY_LIST, NULL },
{ "tsvpipe_attr_multi_64", KEY_LIST, NULL },
{ "tsvpipe_attr_string", KEY_LIST, NULL },
{ "tsvpipe_attr_json", KEY_LIST, NULL },
{ "tsvpipe_field_string", KEY_LIST, NULL },
{ "csvpipe_command", 0, NULL },
{ "csvpipe_field", KEY_LIST, NULL },
{ "csvpipe_attr_uint", KEY_LIST, NULL },
{ "csvpipe_attr_timestamp", KEY_LIST, NULL },
{ "csvpipe_attr_bool", KEY_LIST, NULL },
{ "csvpipe_attr_float", KEY_LIST, NULL },
{ "csvpipe_attr_bigint", KEY_LIST, NULL },
{ "csvpipe_attr_multi", KEY_LIST, NULL },
{ "csvpipe_attr_multi_64", KEY_LIST, NULL },
{ "csvpipe_attr_string", KEY_LIST, NULL },
{ "csvpipe_attr_json", KEY_LIST, NULL },
{ "csvpipe_field_string", KEY_LIST, NULL },
{ "csvpipe_delimiter", 0, NULL },
{ NULL, 0, NULL }
};
/// allowed keys for index section
static KeyDesc_t g_dKeysIndex[] =
{
{ "source", KEY_LIST, NULL },
{ "path", 0, NULL },
{ "docinfo", KEY_REMOVED, NULL },
{ "mlock", KEY_DEPRECATED, "mlock in particular access_... option" },
{ "morphology", 0, NULL },
{ "stopwords", 0, NULL },
{ "exceptions", 0, NULL },
{ "wordforms", KEY_LIST, NULL },
{ "embedded_limit", 0, NULL },
{ "min_word_len", 0, NULL },
{ "charset_type", KEY_REMOVED, NULL },
{ "charset_table", 0, NULL },
{ "ignore_chars", 0, NULL },
{ "min_prefix_len", 0, NULL },
{ "min_infix_len", 0, NULL },
{ "max_substring_len", KEY_DEPRECATED, "dict=keywords" },
{ "prefix_fields", 0, NULL },
{ "infix_fields", 0, NULL },
{ "enable_star", KEY_REMOVED, NULL },
{ "ngram_len", 0, NULL },
{ "ngram_chars", 0, NULL },
{ "phrase_boundary", 0, NULL },
{ "phrase_boundary_step", 0, NULL },
{ "ondisk_dict", KEY_REMOVED, NULL },
{ "type", 0, NULL },
{ "local", KEY_LIST, NULL },
{ "agent", KEY_LIST, NULL },
{ "agent_blackhole", KEY_LIST, NULL },
{ "agent_persistent", KEY_LIST, NULL },
{ "agent_retry_count", 0, NULL },
{ "mirror_retry_count", 0, NULL },
{ "agent_connect_timeout", 0, NULL },
{ "ha_strategy", 0, NULL },
{ "agent_query_timeout", 0, NULL },
{ "html_strip", 0, NULL },
{ "html_index_attrs", 0, NULL },
{ "html_remove_elements", 0, NULL },
{ "preopen", 0, NULL },
{ "inplace_enable", 0, NULL },
{ "inplace_hit_gap", 0, NULL },
{ "inplace_docinfo_gap", KEY_REMOVED, NULL },
{ "inplace_reloc_factor", 0, NULL },
{ "inplace_write_factor", 0, NULL },
{ "index_exact_words", 0, NULL },
{ "min_stemming_len", 0, NULL },
{ "overshort_step", 0, NULL },
{ "stopword_step", 0, NULL },
{ "blend_chars", 0, NULL },
{ "expand_keywords", 0, NULL },
{ "hitless_words", 0, NULL },
{ "hit_format", KEY_HIDDEN | KEY_DEPRECATED, "default value" },
{ "rt_field", KEY_LIST, NULL },
{ "rt_attr_uint", KEY_LIST, NULL },
{ "rt_attr_bigint", KEY_LIST, NULL },
{ "rt_attr_float", KEY_LIST, NULL },
{ "rt_attr_float_vector", KEY_LIST, NULL },
{ "rt_attr_timestamp", KEY_LIST, NULL },
{ "rt_attr_string", KEY_LIST, NULL },
{ "rt_attr_multi", KEY_LIST, NULL },
{ "rt_attr_multi_64", KEY_LIST, NULL },
{ "rt_attr_json", KEY_LIST, NULL },
{ "rt_attr_bool", KEY_LIST, NULL },
{ "rt_mem_limit", 0, NULL },
{ "dict", 0, NULL },
{ "index_sp", 0, NULL },
{ "index_zones", 0, NULL },
{ "blend_mode", 0, NULL },
{ "regexp_filter", KEY_LIST, NULL },
{ "bigram_freq_words", 0, NULL },
{ "bigram_index", 0, NULL },
{ "index_field_lengths", 0, NULL },
{ "divide_remote_ranges", KEY_HIDDEN, NULL },
{ "stopwords_unstemmed", 0, NULL },
{ "global_idf", 0, NULL },
{ "rlp_context", KEY_REMOVED, NULL },
{ "ondisk_attrs", KEY_DEPRECATED, "access_plain_attrs = mmap" },
{ "index_token_filter", 0, NULL },
{ "morphology_skip_fields", 0, NULL },
{ "killlist_target", 0, nullptr },
{ "read_buffer_docs", 0, nullptr },
{ "read_buffer_hits", 0, nullptr },
{ "read_buffer_columnar", 0, nullptr },
{ "read_unhinted", 0, nullptr },
{ "attr_update_reserve", 0, nullptr },
{ "access_plain_attrs", 0, nullptr },
{ "access_blob_attrs", 0, nullptr },
{ "access_doclists", 0, nullptr },
{ "access_hitlists", 0, nullptr },
{ "access_dict", 0, nullptr },
{ "stored_fields", 0, nullptr },
{ "stored_only_fields", 0, nullptr },
{ "docstore_block_size", 0, nullptr },
{ "docstore_compression", 0, nullptr },
{ "docstore_compression_level", 0, nullptr },
{ "columnar_attrs", 0, nullptr },
{ "columnar_no_fast_fetch", 0, nullptr },
{ "rowwise_attrs", 0, nullptr },
{ "columnar_strings_no_hash", 0, nullptr },
{ "columnar_compression_uint32", KEY_REMOVED, nullptr },
{ "columnar_compression_int64", KEY_REMOVED, nullptr },
{ "columnar_subblock", KEY_REMOVED, nullptr },
{ "optimize_cutoff", 0, nullptr },
{ "engine_default", 0, nullptr },
{ "knn", 0, nullptr },
{ "json_secondary_indexes", 0, nullptr },
{ "jieba_hmm", 0, nullptr },
{ "jieba_mode", 0, nullptr },
{ "jieba_user_dict_path", 0, nullptr },
{ nullptr, 0, nullptr }
};
/// allowed keys for indexer section
static KeyDesc_t g_dKeysIndexer[] =
{
{ "mem_limit", 0, NULL },
{ "max_iops", 0, NULL },
{ "max_iosize", 0, NULL },
{ "max_xmlpipe2_field", 0, NULL },
{ "max_file_field_buffer", 0, NULL },
{ "write_buffer", 0, NULL },
{ "on_file_field_error", 0, NULL },
{ "on_json_attr_error", KEY_DEPRECATED, "on_json_attr_error in common{..} section" },
{ "json_autoconv_numbers", KEY_DEPRECATED, "json_autoconv_numbers in common{..} section" },
{ "json_autoconv_keynames", KEY_DEPRECATED, "json_autoconv_keynames in common{..} section" },
{ "lemmatizer_cache", 0, NULL },
{ "ignore_non_plain", 0, NULL },
{ NULL, 0, NULL }
};
/// allowed keys for searchd section
static KeyDesc_t g_dKeysSearchd[] =
{
{ "address", KEY_REMOVED, NULL },
{ "port", KEY_REMOVED, NULL },
{ "listen", KEY_LIST, NULL },
{ "log", 0, NULL },
{ "query_log", 0, NULL },
{ "read_timeout", KEY_DEPRECATED, "network_timeout" },
{ "network_timeout", 0, NULL },
{ "client_timeout", 0, NULL },
{ "reset_network_timeout_on_packet", 0, NULL },
{ "max_children", KEY_REMOVED, NULL },
{ "pid_file", 0, NULL },
{ "max_matches", KEY_REMOVED, NULL },
{ "seamless_rotate", 0, NULL },
{ "preopen_indexes", KEY_DEPRECATED, "preopen_tables" },
{ "unlink_old", 0, NULL },
{ "ondisk_dict_default", KEY_REMOVED, NULL },
{ "attr_flush_period", 0, NULL },
{ "max_packet_size", 0, NULL },
{ "mva_updates_pool", KEY_REMOVED, NULL },
{ "max_filters", 0, NULL },
{ "max_filter_values", 0, NULL },
{ "max_open_files", 0, NULL },
{ "listen_backlog", 0, NULL },
{ "listen_tfo", 0, NULL },
{ "read_buffer", KEY_DEPRECATED, "read_buffer_docs or read_buffer_hits" },
{ "read_buffer_docs", 0, NULL },
{ "read_buffer_hits", 0, NULL },
{ "read_buffer_columnar", 0, NULL },
{ "read_unhinted", 0, NULL },
{ "max_batch_queries", 0, NULL },
{ "subtree_docs_cache", 0, NULL },
{ "subtree_hits_cache", 0, NULL },
{ "workers", KEY_DEPRECATED, "default value" },
{ "prefork", KEY_HIDDEN, NULL },
{ "dist_threads", KEY_DEPRECATED, "max_threads_per_query" },
{ "max_threads_per_query", 0, NULL },
{ "binlog_flush", 0, NULL },
{ "binlog_path", 0, NULL },
{ "binlog_max_log_size", 0, NULL },
{ "binlog_filename_digits", 0, NULL },
{ "binlog_common", 0, NULL },
{ "thread_stack", 0, NULL },
{ "expansion_limit", 0, NULL },
{ "rt_flush_period", 0, NULL },
{ "query_log_format", 0, NULL },
{ "mysql_version_string", 0, NULL },
{ "plugin_dir", KEY_DEPRECATED, "plugin_dir in common{..} section" },
{ "collation_server", 0, NULL },
{ "collation_libc_locale", 0, NULL },
{ "watchdog", 0, NULL },
{ "prefork_rotation_throttle", KEY_REMOVED, NULL },
{ "snippets_file_prefix", 0, NULL },
{ "sphinxql_state", 0, NULL },
{ "rt_merge_iops", 0, NULL },
{ "rt_merge_maxiosize", 0, NULL },
{ "ha_ping_interval", 0, NULL },
{ "ha_period_karma", 0, NULL },
{ "predicted_time_costs", 0, NULL },
{ "persistent_connections_limit", 0, NULL },
{ "ondisk_attrs_default", KEY_REMOVED, NULL },
{ "shutdown_timeout", 0, NULL },
{ "query_log_min_msec", 0, NULL },
{ "agent_connect_timeout", 0, NULL },
{ "agent_query_timeout", 0, NULL },
{ "agent_retry_delay", 0, NULL },
{ "agent_retry_count", 0, NULL },
{ "net_wait_tm", 0, NULL },
{ "net_throttle_action", 0, NULL },
{ "net_throttle_accept", 0, NULL },
{ "net_send_job", 0, NULL },
{ "net_workers", 0, NULL },
{ "queue_max_length", KEY_REMOVED, NULL },
{ "qcache_ttl_sec", 0, NULL },
{ "qcache_max_bytes", 0, NULL },
{ "qcache_thresh_msec", 0, NULL },
{ "sphinxql_timeout", 0, NULL },
{ "hostname_lookup", 0, NULL },
{ "grouping_in_utc", KEY_DEPRECATED, "timezone" },
{ "query_log_mode", 0, NULL },
{ "prefer_rotate", KEY_DEPRECATED, "seamless_rotate" },
{ "shutdown_token", 0, NULL },
{ "timezone", 0, NULL },
{ "data_dir", 0, NULL },
{ "node_address", 0, NULL },
{ "server_id", 0, NULL },
{ "access_plain_attrs", 0, nullptr },
{ "access_blob_attrs", 0, nullptr },
{ "access_doclists", 0, nullptr },
{ "access_hitlists", 0, nullptr },
{ "access_dict", 0, nullptr },
{ "docstore_cache_size", 0, nullptr },
{ "skiplist_cache_size", 0, nullptr },
{ "ssl_cert", 0, nullptr },
{ "ssl_key", 0, nullptr },
{ "ssl_ca", 0, nullptr },
{ "max_connections", 0, nullptr },
{ "threads", 0, nullptr },
{ "jobs_queue_size", 0, nullptr },
{ "not_terms_only_allowed", 0, nullptr },
{ "query_log_commands", 0, nullptr },
{ "auto_optimize", 0, nullptr },
{ "pseudo_sharding", 0, nullptr },
{ "optimize_cutoff", 0, nullptr },
{ "secondary_indexes", 0, nullptr },
{ "accurate_aggregation", 0, nullptr },
{ "distinct_precision_threshold", 0, nullptr },
{ "preopen_tables", 0, nullptr },
{ "buddy_path", 0, nullptr },
{ "telemetry", 0, nullptr },
{ "auto_schema", 0, nullptr },
{ "engine", 0, nullptr },
{ "join_cache_size", 0, nullptr },
{ "replication_connect_timeout", 0, NULL },
{ "replication_query_timeout", 0, NULL },
{ "replication_retry_delay", 0, NULL },
{ "replication_retry_count", 0, NULL },
{ "expansion_merge_threshold_docs", 0, NULL },
{ "expansion_merge_threshold_hits", 0, NULL },
{ "merge_buffer_attributes", 0, NULL },
{ "merge_buffer_columnar", 0, NULL },
{ "merge_buffer_storage", 0, NULL },
{ "merge_buffer_fulltext", 0, NULL },
{ "merge_buffer_dict", 0, NULL },
{ "merge_si_memlimit", 0, NULL },
{ NULL, 0, NULL }
};
/// allowed keys for common section
static KeyDesc_t g_dKeysCommon[] =
{
{ "lemmatizer_base", 0, NULL },
{ "on_json_attr_error", 0, NULL },
{ "json_autoconv_numbers", 0, NULL },
{ "json_autoconv_keynames", 0, NULL },
{ "rlp_root", KEY_REMOVED, NULL },
{ "rlp_environment", KEY_REMOVED, NULL },
{ "icu_data_dir", KEY_REMOVED, NULL },
{ "rlp_max_batch_size", KEY_REMOVED, NULL },
{ "rlp_max_batch_docs", KEY_REMOVED, NULL },
{ "plugin_dir", 0, NULL },
{ "progressive_merge", 0, NULL },
{ NULL, 0, NULL }
};
struct KeySection_t
{
const char * m_szKey; ///< key name
KeyDesc_t * m_pSection; ///< section to refer
bool m_bNamed; ///< true if section is named. false if plain
const char * m_sAlias; ///< key name alias
};
static KeySection_t g_dConfigSections[] =
{
{ "source", g_dKeysSource, true, nullptr },
{ "index", g_dKeysIndex, true, "table" },
{ "indexer", g_dKeysIndexer, false, nullptr },
{ "searchd", g_dKeysSearchd, false, nullptr },
{ "common", g_dKeysCommon, false, nullptr },
{ NULL, NULL, false, nullptr }
};
//////////////////////////////////////////////////////////////////////////
/// simple config file
class CSphConfigParser
{
public:
CSphConfigParser ( const VecTraits_T<char>& dData, CSphString sFileName ) noexcept;
bool Parse();
const CSphConfig& GetConfig() const noexcept { return m_tConf; }
private:
VecTraits_T<char> m_dData;
CSphString m_sFileName;
CSphConfig m_tConf;
int m_iLine = -1;
CSphString m_sSectionType;
CSphString m_sSectionName;
int m_iWarnings = 0;
static constexpr int WARNS_THRESH = 5;
private:
bool AddSection ( const char * szType, const char * szSection );
void AddKey ( const char * szKey, char * szValue );
bool ValidateKey ( const char * szKey );
};
static const KeySection_t * GetSection ( const char * szKey )
{
assert ( szKey );
const KeySection_t * pSection = g_dConfigSections;
while ( pSection->m_szKey )
{
if ( strcasecmp ( szKey, pSection->m_szKey )==0 || ( pSection->m_sAlias && strcasecmp ( szKey, pSection->m_sAlias )==0 ) )
break;
++pSection;
}
return pSection;
}
static bool IsPlainSection ( const KeySection_t * pSection )
{
assert ( pSection );
return pSection->m_szKey && !pSection->m_bNamed;
}
static bool IsNamedSection ( const KeySection_t * pSection )
{
assert ( pSection );
return pSection->m_szKey && pSection->m_bNamed;
}
bool CSphConfigParser::AddSection ( const char * szType, const char * szSection )
{
m_sSectionType = szType;
m_sSectionName = szSection;
if ( !m_tConf.Exists ( m_sSectionType ) )
m_tConf.Add ( CSphConfigType(), m_sSectionType ); // FIXME! be paranoid, verify that it returned true
if ( m_tConf[m_sSectionType].Exists ( m_sSectionName ) )
return TlsMsg::Err ( "section '%s' (type='%s') already exists", m_sSectionName.cstr(), m_sSectionType.cstr() );
m_tConf[m_sSectionType].Add ( CSphConfigSection(), m_sSectionName ); // FIXME! be paranoid, verify that it returned true
return true;
}
void CSphConfigParser::AddKey ( const char * szKey, char * szValue )
{
assert ( m_tConf.Exists ( m_sSectionType ) );
assert ( m_tConf[m_sSectionType].Exists ( m_sSectionName ) );
szValue = trim ( szValue );
CSphConfigSection & tSec = m_tConf[m_sSectionType][m_sSectionName];
tSec.AddEntry ( szKey, szValue );
}
bool CSphConfigParser::ValidateKey ( const char * szKey )
{
// get proper descriptor table
// OPTIMIZE! move lookup to AddSection
const KeySection_t * pSection = GetSection ( m_sSectionType.cstr() );
const KeyDesc_t * pDesc = nullptr;
if ( pSection->m_szKey )
pDesc = pSection->m_pSection;
if ( !pDesc )
return TlsMsg::Err( "unknown section type '%s'", m_sSectionType.cstr() );
// check if the key is known
while ( pDesc->m_sKey && strcasecmp ( pDesc->m_sKey, szKey )!=0 )
pDesc++;
if ( !pDesc->m_sKey )
return TlsMsg::Err( "unknown key name '%s'", szKey );
// warn about deprecate keys
if ( pDesc->m_iFlags & KEY_DEPRECATED )
if ( ++m_iWarnings<=WARNS_THRESH )
fprintf ( stdout, "WARNING: key '%s' is deprecated in %s line %d; use '%s' instead.\n", szKey, m_sFileName.cstr(), m_iLine, pDesc->m_sExtra );
// warn about list/non-list keys
if (!( pDesc->m_iFlags & KEY_LIST ))
{
CSphConfigSection & tSec = m_tConf[m_sSectionType][m_sSectionName];
if ( tSec( szKey ) && !tSec[szKey].m_bTag )
if ( ++m_iWarnings<=WARNS_THRESH )
fprintf ( stdout, "WARNING: key '%s' is not multi-value; value in %s line %d will be ignored.\n", szKey, m_sFileName.cstr(), m_iLine );
}
if ( pDesc->m_iFlags & KEY_REMOVED )
if ( ++m_iWarnings<=WARNS_THRESH )
fprintf ( stdout, "WARNING: key '%s' was permanently removed from configuration. Refer to documentation for details.\n", szKey );
return true;
}
#if !_WIN32
constexpr bool HasSheBang()
{
return true;
}
static bool TryToExec ( char * pExecLine, const char * szFilename, CSphVector<char> & dResult )
{
using namespace TlsMsg;
ResetErr(); // clean any inherited msgs
const int BUFFER_SIZE = 65536;
int dPipe[2] = { -1, -1 };
if ( pipe ( dPipe ) )
return Err ( "pipe() failed (error=%s)", strerrorm(errno) );
pExecLine = trim ( pExecLine );
int iRead = dPipe[0];
int iWrite = dPipe[1];
int iChild = fork();
if ( iChild==0 )
{
close ( iRead );
close ( STDOUT_FILENO );
dup2 ( iWrite, STDOUT_FILENO );
searchd::CleanAfterFork ();
LazyVector_T<const char*> dArgv;
dArgv.Add ( pExecLine ); // 0-th arg - prog itself
for ( char* pPtr = pExecLine; *pPtr; ++pPtr )
{
if ( sphIsSpace ( *pPtr ) )
{
*pPtr = '\0';
dArgv.Add ( trim ( pPtr+1 ) ); // 1-st arg (if any)
break;
}
}
dArgv.Add ( szFilename ); // last arg (original file)
dArgv.Add ( nullptr ); // null terminator, mandatory
execv ( pExecLine, (char**)dArgv.begin() );
exit ( 1 );
} else if ( iChild==-1 )
return Err ( "fork failed: [%d] %s", errno, strerrorm(errno) );
close ( iWrite );
dResult.Reset();
while (true)
{
dResult.ReserveGap ( BUFFER_SIZE );
auto pBuf = (void *) dResult.End();
auto iChunk = (int) read ( iRead, pBuf, BUFFER_SIZE );
if ( iChunk>0 )
dResult.AddN ( iChunk );
if ( !iChunk ) // eof
break;
if ( iChunk==-1 && errno!=EINTR ) // we can get SIGCHLD just before eof, other is fail
{
Err ( "pipe read error: [%d] %s", errno, strerrorm (errno));
break;
}
}
close ( iRead );
int iStatus, iResult;
do
{
// can be interrupted by pretty much anything (e.g. SIGCHLD from other searchd children)
iResult = waitpid ( iChild, &iStatus, 0 );
// they say this can happen if child exited and SIGCHLD was ignored
// a cleaner one would be to temporary handle it here, but can we be bothered
if ( iResult==-1 && errno==ECHILD )
{
iResult = iChild;
iStatus = 0;
}
if ( iResult==-1 && errno!=EINTR )
return Err ( "waitpid() failed: [%d] %s", errno, strerrorm(errno) );
}
while ( iResult!=iChild );
if ( WIFEXITED ( iStatus ) && WEXITSTATUS ( iStatus ) )
// FIXME? read stderr and log that too
return Err ( "error executing '%s' status = %d", pExecLine, WEXITSTATUS ( iStatus ) );
if ( WIFSIGNALED ( iStatus ) )
return Err ( "error executing '%s', killed by signal %d", pExecLine, WTERMSIG ( iStatus ) );
return !HasErr();
}
#else
template<typename ITER>
static bool TryToExec ( ITER, const char *, CSphVector<char> & )
{
return true;
}
constexpr bool HasSheBang()
{
return false;
}
#endif
std::pair<bool, CSphVector<char>> FetchAndCheckIfChanged ( const CSphString& sFilename )
{
static DWORD uStoredCRC32 = 0;
static struct_stat tStoredStat;
CSphVector<char> dContent;
constexpr auto BUF_SIZE = 8192;
std::array<char, BUF_SIZE> sBuf;
auto* fp = fopen ( sFilename.scstr(), "rb" );
AT_SCOPE_EXIT ( [fp] { if ( fp ) fclose ( fp ); } );
if ( !fp )
return { true, dContent };
struct_stat tStat = { 0 };
if ( fstat ( fileno ( fp ), &tStat ) < 0 )
memset ( &tStat, 0, sizeof ( tStat ) );
bool bGotLine = !!fgets ( sBuf.data(), sBuf.size(), fp );
if ( !bGotLine )
return { true, dContent };
DWORD uCRC32;
if constexpr ( HasSheBang() )
{
auto pSheBang = std::find_if_not ( begin ( sBuf ), end ( sBuf ), isspace );
if ( pSheBang + 2 < sBuf.end() && pSheBang[0] == '#' && pSheBang[1] == '!' )
{
sBuf.back() = '\0'; // just safety
if ( !TryToExec ( pSheBang + 2, sFilename.cstr(), dContent ) )
{
dContent.Reset();
return { true, dContent };
}
uCRC32 = sphCRC32 ( dContent.Begin(), dContent.GetLength() );
}
}
if ( dContent.IsEmpty() )
{
while ( bGotLine ) {
auto iLen = (int)strlen ( sBuf.data() );
dContent.Append ( { sBuf.data(), iLen } );
bGotLine = !!fgets ( sBuf.data(), sBuf.size(), fp );
}
}
uCRC32 = sphCRC32 ( dContent.Begin(), dContent.GetLength() );
if ( uStoredCRC32 == uCRC32
&& tStat.st_size == tStoredStat.st_size
&& tStat.st_mtime == tStoredStat.st_mtime
&& tStat.st_ctime == tStoredStat.st_ctime )
return { false, dContent };
uStoredCRC32 = uCRC32;
tStoredStat = tStat;
return { true, dContent };
}
CSphConfigParser::CSphConfigParser ( const VecTraits_T<char>& dData, CSphString sFileName ) noexcept
: m_dData { dData }
, m_sFileName { std::move ( sFileName ) }
{
}
bool CSphConfigParser::Parse ()
{
using namespace TlsMsg;
ResetErr();
constexpr int L_TOKEN = 64;
// init parser
m_iLine = 0;
m_iWarnings = 0;
const char* p = m_dData.begin();
const char* pDataEnd = m_dData.end();
const char* pLineBegin = p;
const char* pLineEnd = p;
std::array<char,L_TOKEN> sToken;
DWORD uToken = 0;
int iCh = -1;
enum class States_e { S_TOP, S_SKIP2NL, S_TOK, S_TYPE, S_SEC, S_CHR, S_VALUE, S_SECNAME, S_SECBASE, S_KEY };
auto eState = States_e::S_TOP;
std::array<States_e,8> eStack;
DWORD uStack = 0;
int iValue = 0, iValueMax = 65535;
auto sValue = std::make_unique<char[]> ( iValueMax + 1 );
#define LOC_ERROR(...) { Err(__VA_ARGS__); break; }
auto LOC_PUSH = [&uStack, &eStack, &eState] ( States_e eNew ) { assert ( uStack<eStack.size() ); eStack[uStack++] = std::exchange(eState,eNew); };
auto LOC_POP = [&uStack, &eStack, &eState] { assert ( uStack > 0 ); eState = eStack[--uStack]; };
auto LOC_BACK = [&p] { --p; };
for ( ; p < pDataEnd; ++p )
{
// if this line is over, load next line
if ( p >= pLineEnd )
{
++m_iLine;
p = pLineBegin = std::exchange ( pLineEnd, std::find ( pLineEnd, pDataEnd, '\n' ) );
if ( pLineEnd < pDataEnd )
++pLineEnd;
}
switch ( eState )
{
// handle S_TOP state
case States_e::S_TOP:
{
if ( isspace(*p) ) continue;
if ( *p=='#' ) { LOC_PUSH ( States_e::S_SKIP2NL ); continue; }
if ( !sphIsAlpha(*p) ) LOC_ERROR ( "invalid token" );
uToken = 0;
LOC_PUSH ( States_e::S_TYPE );
LOC_PUSH ( States_e::S_TOK );
LOC_BACK();
continue;
}
// handle S_SKIP2NL state
case States_e::S_SKIP2NL:
{
LOC_POP ();
p = pLineEnd;
continue;
}
// handle S_TOK state
case States_e::S_TOK:
{
if ( !uToken && !sphIsAlpha(*p) )LOC_ERROR ( "internal error (non-alpha in S_TOK pos 0)" );
if ( uToken==sToken.size() ) LOC_ERROR ( "token too long" );
if ( !sphIsAlpha(*p) ) { LOC_POP (); sToken [ uToken ] = '\0'; uToken = 0; LOC_BACK(); continue; }
if ( !uToken ) { sToken[0] = '\0'; }
sToken [ uToken++ ] = *p; continue;
}
// handle S_TYPE state
case States_e::S_TYPE:
{
if ( isspace(*p) ) continue;
if ( *p=='#' ) { LOC_PUSH ( States_e::S_SKIP2NL ); continue; }
if ( !sToken[0] ) { LOC_ERROR ( "internal error (empty token in S_TYPE)" ); }
const KeySection_t * pSection = GetSection ( sToken.data() );
if ( IsPlainSection ( pSection ) )
{
if ( !AddSection ( sToken.data(), sToken.data() ) )
break;
sToken[0] = '\0';
LOC_POP();
LOC_PUSH ( States_e::S_SEC );
LOC_PUSH ( States_e::S_CHR );
iCh = '{';
LOC_BACK();
continue;
}
if ( IsNamedSection ( pSection ) )
{
m_sSectionType = pSection->m_szKey;
sToken[0] = '\0';
LOC_POP ();
LOC_PUSH ( States_e::S_SECNAME );
LOC_BACK();
continue;
}
LOC_ERROR ( "invalid section type '%s'", sToken );
}
// handle S_CHR state
case States_e::S_CHR:
{
if ( isspace(*p) ) continue;
if ( *p=='#' ) { LOC_PUSH ( States_e::S_SKIP2NL ); continue; }
if ( *p!=iCh ) LOC_ERROR ( "expected '%c', got '%c'", iCh, *p );
LOC_POP (); continue;
}
// handle S_SEC state
case States_e::S_SEC:
{
if ( isspace(*p) ) continue;
if ( *p=='#' ) { LOC_PUSH ( States_e::S_SKIP2NL ); continue; }
if ( *p=='}' ) { LOC_POP (); continue; }
if ( sphIsAlpha(*p) ) { LOC_PUSH ( States_e::S_KEY ); LOC_PUSH ( States_e::S_TOK ); LOC_BACK(); iValue = 0; sValue[0] = '\0'; continue; }
LOC_ERROR ( "section contents: expected token, got '%c'", *p );
}
// handle S_KEY state
case States_e::S_KEY:
{
// validate the key
if ( !ValidateKey ( sToken.data() ) )
break;
// an assignment operator and a value must follow
LOC_POP (); LOC_PUSH ( States_e::S_VALUE ); LOC_PUSH ( States_e::S_CHR ); iCh = '=';
LOC_BACK(); // because we did not work the char at all
continue;
}
// handle S_VALUE state
case States_e::S_VALUE:
{
if ( *p=='\n' ) { AddKey ( sToken.data(), sValue.get() ); iValue = 0; LOC_POP (); continue; }
if ( *p=='#' ) { AddKey ( sToken.data(), sValue.get() ); iValue = 0; LOC_POP (); LOC_PUSH ( States_e::S_SKIP2NL ); continue; }
if ( *p=='\\' )
{
// backslash at the line end: continuation operator; let the newline be unhandled
if ( p[1]=='\r' || p[1]=='\n' ) { LOC_PUSH ( States_e::S_SKIP2NL ); continue; }
// backslash before number sign: comment start char escaping; advance and pass it
if ( p[1]=='#' ) { p++; }
// otherwise: just a char, pass it
}
if ( iValue<iValueMax ) { sValue[iValue++] = *p; sValue[iValue] = '\0'; }
continue;
}
// handle S_SECNAME state
case States_e::S_SECNAME:
{
if ( isspace(*p) ) { continue; }
if ( !sToken[0]&&!sphIsAlpha(*p)) { LOC_ERROR ( "named section: expected name, got '%c'", *p ); }
if ( !sToken[0] ) { LOC_PUSH ( States_e::S_TOK ); LOC_BACK(); continue; }
if ( !AddSection ( m_sSectionType.cstr(), sToken.data() ) ) break;
sToken[0] = '\0';
if ( *p==':' ) { eState = States_e::S_SECBASE; continue; }
if ( *p=='{' ) { eState = States_e::S_SEC; continue; }
LOC_ERROR ( "named section: expected ':' or '{', got '%c'", *p );
}
// handle S_SECBASE state
case States_e::S_SECBASE:
{
if ( isspace(*p) ) { continue; }
if ( !sToken[0]&&!sphIsAlpha(*p)) { LOC_ERROR ( "named section: expected parent name, got '%c'", *p ); }
if ( !sToken[0] ) { LOC_PUSH ( States_e::S_TOK ); LOC_BACK(); continue; }
// copy the section
assert ( m_tConf.Exists ( m_sSectionType ) );
if ( !m_tConf [ m_sSectionType ].Exists ( sToken.data() ) )
LOC_ERROR ( "inherited section '%s': parent doesn't exist (parent name='%s', type='%s')", m_sSectionName.cstr(), sToken, m_sSectionType.cstr() );
CSphConfigSection & tDest = m_tConf [ m_sSectionType ][ m_sSectionName ];
tDest = m_tConf [ m_sSectionType ][ sToken.data() ];
// mark all values in the target section as "to be overridden"
for ( auto& tVal : tDest )
tVal.second.m_bTag = true;
LOC_BACK();
eState = States_e::S_SEC;
LOC_PUSH ( States_e::S_CHR );
iCh = '{';
continue;
}
default:
LOC_ERROR ( "internal error (unhandled state %d)", eState );
break;
}
// it should be error, as we never fall down to here with valid config
assert ( HasErr() );
break; // for ( ...
}
#undef LOC_ERROR
if ( m_iWarnings>WARNS_THRESH )
fprintf ( stdout, "WARNING: %d more warnings skipped.\n", m_iWarnings-WARNS_THRESH );
if ( !HasErr() )
return true;
auto iCol = (int)( p - pLineBegin + 1 );
return Err ( "ERROR: %s in %s line %d col %d.\n", szError (), m_sFileName.cstr (), m_iLine, iCol );
}
/////////////////////////////////////////////////////////////////////////////
#if _WIN32
#pragma message( "Automatically linking with AdvAPI32.Lib" )
#pragma comment( lib, "AdvAPI32.Lib" )
void CheckWinInstall()
{
HKEY hKey;
LONG iRes = RegOpenKeyExW ( HKEY_LOCAL_MACHINE, L"SOFTWARE\\WOW6432Node\\Manticore Software LTD", 0, KEY_READ, &hKey );
if ( iRes!=ERROR_SUCCESS )
return;
WCHAR szBuffer[512];
DWORD uBufferSize = sizeof(szBuffer);
iRes = RegQueryValueExW ( hKey, L"manticore", 0, NULL, (LPBYTE)szBuffer, &uBufferSize);
if ( iRes!=ERROR_SUCCESS )
return;
g_sWinInstallPath = std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t>().to_bytes(szBuffer).c_str();
}
CSphString GetWinInstallDir()
{
return g_sWinInstallPath;
}
#endif
/////////////////////////////////////////////////////////////////////////////
CSphString sphGetConfigFile ( const char * szHint )
{
if ( szHint )
return RealPath(szHint);
// fallback to defaults if there was no explicit config specified
#ifdef SYSCONFDIR
static const char* sConfigFile = SYSCONFDIR "/manticore.conf";
if ( sphIsReadable ( sConfigFile ) )
return RealPath(sConfigFile);
#endif
static const char* sWorkingConfiFile = "./manticore.conf";
if ( sphIsReadable ( sWorkingConfiFile ) )
return RealPath(sWorkingConfiFile);
#if _WIN32
if ( !GetWinInstallDir().IsEmpty() )
{
static CSphString sConf;
sConf.SetSprintf ( "%s/etc/manticoresearch/manticore.conf", GetWinInstallDir().cstr() );
if ( sphIsReadable ( sConf.cstr() ) )
return RealPath(sConf);
}
#endif
sphFatal ( "no readable config file (looked in "
#ifdef SYSCONFDIR
SYSCONFDIR "/manticore.conf, "
#endif
"./manticore.conf)." );
return nullptr;
}
bool ParseConfig ( CSphConfig* pConfig, CSphString sFileName, const VecTraits_T<char>& dData )
{
// load config
CSphConfigParser cp { dData, std::move(sFileName) };
if ( !cp.Parse () )
return false;
assert ( pConfig );
*pConfig = cp.GetConfig();
return true;
}
enum class Indexes_e : bool { eNeed, eNotNeed };
template<Indexes_e eNeed>
inline static CSphConfig LoadConfig ( const CSphString & sPath, bool bTraceToStdout, CSphString & sActualPath )
{
// fallback to defaults if there was no explicit config specified
sActualPath = sphGetConfigFile ( sPath.cstr() );
if ( bTraceToStdout )
fprintf ( stdout, "using config file '%s'...\n", sActualPath.cstr() );
// load config
auto [bChanged, dConfig] = FetchAndCheckIfChanged ( sActualPath );
CSphConfig hConf;
if ( !ParseConfig ( &hConf, sActualPath, dConfig ) )
sphDie ( "failed to parse config file '%s': %s", sActualPath.cstr(), TlsMsg::szError() );
if constexpr ( eNeed==Indexes_e::eNeed )
if ( !hConf ( "index" ) )
sphDie ( "no tables found in config file '%s'", sActualPath.cstr() );
return hConf;
}
CSphConfig sphLoadConfig ( const CSphString & sPath, bool bTraceToStdout, CSphString & sActualPath )
{
return LoadConfig<Indexes_e::eNeed> ( sPath, bTraceToStdout, sActualPath );
}
CSphConfig sphLoadConfig ( const CSphString & sPath, bool bTraceToStdout )
{
CSphString sActualPath;
return LoadConfig<Indexes_e::eNeed> ( sPath, bTraceToStdout, sActualPath );
}
CSphConfig sphLoadConfigWithoutIndexes ( const CSphString & sPath, bool bTraceToStdout )
{
CSphString sActualPath;
return LoadConfig<Indexes_e::eNotNeed> ( sPath, bTraceToStdout, sActualPath );
}
//////////////////////////////////////////////////////////////////////////
ESphLogLevel g_eLogLevel = SPH_LOG_INFO; // current log level, can be changed on the fly
static void StdoutLogger ( ESphLogLevel eLevel, const char * sFmt, va_list ap )
{
if ( eLevel>g_eLogLevel )
return;
switch ( eLevel )
{
case SPH_LOG_FATAL: fprintf ( stdout, "FATAL: " ); break;
case SPH_LOG_WARNING: fprintf ( stdout, "WARNING: " ); break;
case SPH_LOG_INFO: fprintf ( stdout, "WARNING: " ); break;
case SPH_LOG_DEBUG:
case SPH_LOG_VERBOSE_DEBUG:
case SPH_LOG_VERY_VERBOSE_DEBUG: fprintf ( stdout, "DEBUG: " ); break;
case SPH_LOG_RPL_DEBUG: fprintf ( stdout, "RPL: " ); break;
}
vfprintf ( stdout, sFmt, ap );
fprintf ( stdout, "\n" );
}
static const int MAX_PREFIXES = 10;
static const char * g_dDisabledLevelLogs[SPH_LOG_MAX+1][MAX_PREFIXES] = {{0}};
void sphLogSupress ( const char * sNewPrefix, ESphLogLevel eLevel )
{
for ( const char * &sPrefix : g_dDisabledLevelLogs[eLevel] )
if ( !sPrefix )
{
sPrefix = sNewPrefix;
return;
} else if ( !strcmp ( sPrefix, sNewPrefix ) )
return;
// no space, just overwrite the last one
g_dDisabledLevelLogs[eLevel][MAX_PREFIXES-1] = sNewPrefix;
}
void sphLogSupressRemove ( const char * sDelPrefix, ESphLogLevel eLevel )
{
const char ** ppSource = g_dDisabledLevelLogs[eLevel];
int i = 0;
for ( const char *&sPrefix : g_dDisabledLevelLogs[eLevel] )
if ( sPrefix && !strcmp (sDelPrefix, sPrefix) )
ppSource[i++] = sPrefix;
for (;i<MAX_PREFIXES;++i)
g_dDisabledLevelLogs[eLevel][i] = nullptr;
}
volatile SphLogger_fn& g_pLogger()
{
static SphLogger_fn pLogger = &StdoutLogger;
return pLogger;
}
inline void Log ( ESphLogLevel eLevel, const char * sFmt, va_list ap )
{
if ( !g_pLogger() ) return;
for ( const char * sPrefix : g_dDisabledLevelLogs[eLevel] )
if ( sPrefix && !strncmp ( sPrefix, sFmt, strlen ( sPrefix ) ) )
return;
else if ( !sPrefix )
break;
g_pLogger() ( eLevel, sFmt, ap );
}
void sphLogVa ( const char * sFmt, va_list ap, ESphLogLevel eLevel )
{
Log ( eLevel, sFmt, ap );
}
void sphLogf ( ESphLogLevel eLevel, const char* sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
Log ( eLevel, sFmt, ap );
va_end ( ap );
}
void sphWarning_impl ( const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
sphLogVa ( sFmt, ap );
va_end ( ap );
}
void sphInfo_impl ( const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
sphLogVa ( sFmt, ap, SPH_LOG_INFO );
va_end ( ap );
}
void sphLogFatal ( const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
sphLogVa ( sFmt, ap, SPH_LOG_FATAL );
va_end ( ap );
}
void sphLogDebug_impl ( const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
sphLogVa ( sFmt, ap, SPH_LOG_DEBUG );
va_end ( ap );
}
void sphLogDebugv_impl ( const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
sphLogVa ( sFmt, ap, SPH_LOG_VERBOSE_DEBUG );
va_end ( ap );
}
void sphLogDebugvv_impl ( const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
sphLogVa ( sFmt, ap, SPH_LOG_VERY_VERBOSE_DEBUG );
va_end ( ap );
}
void sphLogDebugRpl_impl ( const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
sphLogVa ( sFmt, ap, SPH_LOG_RPL_DEBUG );
va_end ( ap );
}
namespace // use string builder with custom formatters
{
void CustomLogVa ( const char* sFmt, va_list ap, ESphLogLevel eLevel )
{
StringBuilder_c sMyLine;
sMyLine.vSprintf ( sFmt, ap );
sphLogf ( eLevel, "%s", sMyLine.cstr() );
}
} // namespace
void CustomLog::Warning_impl ( const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
CustomLogVa ( sFmt, ap, SPH_LOG_WARNING );
va_end ( ap );
}
void CustomLog::Info_impl ( const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
CustomLogVa ( sFmt, ap, SPH_LOG_INFO );
va_end ( ap );
}
namespace TimePrefixed
{
static int64_t g_uTimePrefix = 0;
void TimeStart ()
{
g_uTimePrefix = sphMicroTimer ();
}
int64_t TimeStamp()
{
return sphMicroTimer() - g_uTimePrefix;
}
static void TimedLogVa ( const char* sPrefix, const char* sFmt, va_list ap, ESphLogLevel eLevel )
{
if ( eLevel>g_eLogLevel )
return;
StringBuilder_c sMyLine;
sMyLine.Sprintf ( "%s[%t] %s", sPrefix, sphMicroTimer () - g_uTimePrefix, sFmt);
CSphString sFormat;
sMyLine.MoveTo (sFormat);
sMyLine.vSprintf ( sFormat.cstr(), ap );
sphLogf ( eLevel, "%s", sMyLine.cstr () );
}
void LogDebugv ( const char* sPrefix, const char* sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
TimedLogVa ( sPrefix, sFmt, ap, SPH_LOG_VERBOSE_DEBUG );
va_end ( ap );
}
}
//////////////////////////////////////////////////////////////////////////
// CRASH REPORTING
//////////////////////////////////////////////////////////////////////////
namespace tmtoa {
using us_t = int64_t; // useconds
static const us_t US = 1;
static const us_t U5 = 5;
static const us_t U5X = 50 * US;
static const us_t U5C = 500 * US;
static const us_t MS = 1000 * US;
static const us_t MS5 = 5 * MS;
static const us_t M5X = 50 * MS;
static const us_t M5C = 500 * MS;
static const us_t S = 1000 * MS;
static const us_t S3 = 3 * S;
static const us_t S3X = 30 * S;
static const us_t M = 60 * S;
static const us_t M3 = 3 * M;
static const us_t M3X = 30 * M;
static const us_t H = 60 * M;
static const us_t H5 = 72 * M; // 1.2 hour
static const us_t H5X = 12 * H;
static const us_t D = 24 * H;
static const us_t D5 = 84 * H; // 3.5 days
static const us_t W = 7 * D;
static const int NUM_SCALES = 7;
static const char* sSuffixes[NUM_SCALES] = { "us", "ms", "s", "m", "h", "d", "w" };
static const int iSufLens[NUM_SCALES] = { 2, 2, 1, 1, 1, 1, 1 }; // len of names
static const int64_t TMScales[NUM_SCALES] = { US, MS, S, M, H, D, W };
// how many rest digits causes new scale to add instead of frac
static const int iPrecAfter[NUM_SCALES] = { 0, 3, 3, 2, 2, 2, 1 };
static us_t calc_round ( int iScale, int iPrec )
{
// prec: 0 1 2 3 4 5 6 7 8 9 10 11 12 13
// ---------------------------------------------------------------
// usec 0
// msec U5C U5X U5 0
// sec M5C M5X MS5 U5C U5X U5 0
// min S3X S3 M5C M5X MS5 U5C U5X U5 0
// hour M3X M3 S3X S3 M5C M5X MS5 U5C U5X U5 0
// day H5X H5 M3X M3 S3X S3 M5C M5X MS5 U5C U5X U5 0
// week D5 H5X H5 M3X M3 S3X S3 M5C M5X MS5 U5C U5X U5 0
static const int dStartPos[NUM_SCALES] = {13, 10, 7, 5, 3, 1, 0};
static const us_t dRound[] = { D5, H5X, H5, M3X, M3, S3X, S3, M5C, M5X, MS5, U5C, U5X, U5, 0 };
assert (iScale>=0 && iScale<NUM_SCALES);
assert ( iPrec>=0 && iPrec<14);
auto iIdx = dStartPos[iScale] + iPrec;
if ( iIdx>12 )
return 0;
return dRound[iIdx];
}
}
// format timespan expressed in useconds.
template < typename PCHAR >
void TMtoA_T ( PCHAR* pOutput, int64_t nVal, int iPrec )
{
assert ( iPrec<nDividers );
auto& pBegin = *pOutput;
// correct sign, if necessary
if ( nVal<0 )
{
*Tail ( pBegin ) = '-';
++pBegin;
nVal = -nVal;
}
// find the range we deal with (from 6(week) to 0(usecs))
int iSpan;
for ( iSpan = 6; iSpan>0; --iSpan )
if ( nVal>=tmtoa::TMScales[iSpan] )
break;
// round the value
auto iRound = tmtoa::calc_round ( iSpan, iPrec );
if ( iRound )
{
nVal += iRound;
nVal -= nVal % ( iRound + iRound );
}
// after rounding range may change, recalc
for ( iSpan = 6; iSpan>0; --iSpan )
if ( nVal>=tmtoa::TMScales[iSpan] )
break;
while (true)
{
// multiplier and precision digits for range
auto iMul = tmtoa::TMScales[iSpan];
auto iPrecAfter = tmtoa::iPrecAfter[iSpan];
// solid part
::NtoA_T ( &pBegin, nVal / iMul );
// rest
nVal %= iMul;
// check if rest of precision is enough to print extra values
if ( iPrec>0 && iPrec<iPrecAfter )
{
if (iPrec==2) nVal /= (iMul/100);
else if ( iPrec==1) nVal /= (iMul/10);
if ( nVal ) // 0. Stop printing
{
*Tail ( pBegin ) = '.';
++pBegin;
::NtoA_T ( &pBegin, nVal, 10, 0, iPrec, '0' );
}
}
// print specifier
Grow ( pBegin, tmtoa::iSufLens[iSpan] );
memcpy ( Tail ( pBegin ), tmtoa::sSuffixes[iSpan], tmtoa::iSufLens[iSpan] );
pBegin += tmtoa::iSufLens[iSpan];
// all is done
if ( !iPrecAfter || iPrec<iPrecAfter )
return;
// the rest is 0. Stop
if (nVal==0)
return;
// print space before continue.
*Tail ( pBegin ) = ' ';
++pBegin;
// go to next range
iPrec -= iPrecAfter;
--iSpan;
}
}
// format timestamp expressed in useconds.
template < typename PCHAR >
void TMStoA_T ( PCHAR* pOutput, int64_t nVal, int iPrec )
{
auto& pBegin = *pOutput;
if ( !nVal )
{
Grow( pBegin, 5 );
memcpy( Tail( pBegin ), "never", 5 );
pBegin += 5;
return;
}
int64_t iTimespan = nVal - sphMicroTimer();
if ( iTimespan<0 ) // past event
{
TMtoA_T ( pOutput, -iTimespan, iPrec );
// print specifier
Grow ( pBegin, 4 );
memcpy ( Tail ( pBegin ), " ago", 4 );
pBegin += 4;
} else if (iTimespan>0)
{
Grow ( pBegin, 3 );
memcpy ( Tail ( pBegin ), "in ", 3 );
pBegin += 3;
TMtoA_T ( pOutput, iTimespan, iPrec );
} else
{
Grow ( pBegin, 3 );
memcpy ( Tail ( pBegin ), "now", 3 );
pBegin += 3;
}
}
namespace sph {
static int SkipFmt64 ( const char * sFmt )
{
// %lld %lli %llu
if ( sFmt[0]!='\0' && sFmt[1]!='\0' && sFmt[0]=='l' && ( sFmt[1]=='d' || sFmt[1]=='i' || sFmt[1]=='u' ) )
return 2;
// %li %ld %u
if ( sFmt[0]!='\0' && ( sFmt[0]=='d' || sFmt[0]=='i' || sFmt[0]=='u' ) )
return 1;
return 0;
}
template <typename PCHAR>
void vSprintf_T ( PCHAR * _pOutput, const char * sFmt, va_list ap )
{
enum eStates { SNORMAL, SPERCENT, SHAVEFILL, SINWIDTH, SINPREC };
eStates state = SNORMAL;
size_t iPrec = 0;
size_t iWidth = 0;
char cFill = ' ';
bool bHeadingSpace = true;
auto &pOutput = *_pOutput;
char c;
while ( ( c = *sFmt++ )!=0 )
{
// handle regular chars
if ( state==SNORMAL )
{
auto sPercent = strchr (sFmt-1, '%');
if ( !sPercent ) // no formatters, only plain chars
{
auto uLen = strlen (sFmt-1);
Grow ( pOutput, (int) uLen );
memcpy ( Tail ( pOutput ), sFmt-1, (int) uLen );
pOutput += (int) uLen;
sFmt+=uLen-1;
continue;
}
auto uLen = sPercent - sFmt + 1;
if ( uLen )
{
Grow ( pOutput, (int)uLen );
memcpy ( Tail ( pOutput ), sFmt - 1, (int)uLen );
pOutput += uLen;
sFmt+=uLen;
}
// handle percent
state = SPERCENT;
iPrec = 0;
iWidth = 0;
cFill = ' ';
continue;
}
// handle percent
if ( c=='%' && state!=SNORMAL )
{
state = SNORMAL;
*Tail ( pOutput ) = c;
++pOutput;
continue;
}
// handle modifiers
switch ( c )
{
case '0':
if ( state==SPERCENT )
{
cFill = '0';
state = SHAVEFILL;
break;
}
// [[clang::fallthrough]];
case '1': case '2': case '3':
case '4': case '5': case '6':
case '7': case '8': case '9':
if ( state==SPERCENT || state==SHAVEFILL )
{
state = SINWIDTH;
iWidth = c - '0';
} else if ( state==SINWIDTH )
iWidth = iWidth * 10 + c - '0';
else if ( state==SINPREC )
iPrec = iPrec * 10 + c - '0';
break;
case '-':
if ( state==SPERCENT )
bHeadingSpace = false;
else
state = SNORMAL; // FIXME? means that bad/unhandled syntax with dash will be just ignored
break;
case '.':
state = SINPREC;
iPrec = 0;
break;
case 's': // string
{
const char * pValue = va_arg ( ap, const char * );
if ( !pValue )
pValue = "(null)";
size_t iValue = strlen ( pValue );
if ( iPrec && iPrec<iValue )
iValue = iPrec;
if ( iWidth && iValue>iWidth )
iWidth = 0;
if ( iWidth )
iWidth-=iValue;
Grow ( pOutput, (int) iWidth );
if ( iWidth && bHeadingSpace )
{
memset ( Tail ( pOutput ), ' ', (int) iWidth );
pOutput += (int) iWidth;
}
Grow ( pOutput, (int) iValue );
memcpy ( Tail ( pOutput ), pValue, iValue );
pOutput += (int) iValue;
if ( iWidth && !bHeadingSpace )
{
memset ( Tail ( pOutput ), ' ', (int) iWidth );
pOutput += (int) iWidth;
}
state = SNORMAL;
break;
}
case 'c': // char
{
auto cValue = (char) va_arg ( ap, int );
Grow ( pOutput, 1 );
*Tail ( pOutput ) = cValue;
++pOutput;
state = SNORMAL;
break;
}
case 'p': // pointer
{
void * pValue = va_arg ( ap, void * );
auto uValue = uint64_t ( pValue );
::NtoA_T ( &pOutput, uValue, 16, (int) iWidth, (int) iPrec, cFill );
state = SNORMAL;
break;
}
case 'x': // hex unsigned integer
case 'u': // decimal unsigned
{
DWORD uValue = va_arg ( ap, DWORD );
::NtoA_T ( &pOutput, uValue, ( c=='x' ) ? 16 : 10, (int) iWidth,(int) iPrec, cFill );
state = SNORMAL;
break;
}
case 'd': // decimal integer
{
int iValue = va_arg ( ap, int );
::NtoA_T ( &pOutput, iValue, 10, (int) iWidth, (int) iPrec, cFill );
state = SNORMAL;
break;
}
case 'i': // ignore (skip) current integer. Output nothing.
{
int VARIABLE_IS_NOT_USED iValue = va_arg ( ap, int );
state = SNORMAL;
break;
}
case 'l': // decimal int64
{
int64_t iValue = va_arg ( ap, int64_t );
::NtoA_T ( &pOutput, iValue, 10, (int) iWidth, (int) iPrec, cFill );
state = SNORMAL;
sFmt += SkipFmt64 ( sFmt );
break;
}
case 'U': // decimal uint64
{
uint64_t iValue = va_arg ( ap, uint64_t );
::NtoA_T ( &pOutput, iValue, 10, (int) iWidth, (int) iPrec, cFill );
state = SNORMAL;
break;
}
case 'D': // fixed-point signed 64-bit
{
int64_t iValue = va_arg ( ap, int64_t );
::IFtoA_T ( &pOutput, iValue, (int) iPrec );
state = SNORMAL;
break;
}
case 'F': // fixed-point signed 32-bit
{
int iValue = va_arg ( ap, int );
::IFtoA_T ( &pOutput, iValue, (int) iPrec );
state = SNORMAL;
break;
}
case 't': // timespan given in int64 useconds
{
int64_t iValue = va_arg ( ap, int64_t );
::TMtoA_T ( &pOutput, iValue, (int) iPrec );
state = SNORMAL;
break;
}
case 'T': // timestamp (related to now()) given in int64 useconds
{
int64_t iValue = va_arg ( ap, int64_t );
::TMStoA_T ( &pOutput, iValue, (int) iPrec );
state = SNORMAL;
break;
}
case 'f': // float (fall-back to standard)
{
double fValue = va_arg ( ap, double );
// ensure 32 is enough to take any float value.
Grow ( pOutput, Max ( (int) iWidth, 32 ));
// extract current format from source format line
auto *pF = sFmt;
while ( *--pF!='%' );
if ( memcmp ( pF, "%f", 2 )!=0 )
{
// invoke standard sprintf
char sFormat[32] = { 0 };
memcpy ( sFormat, pF, sFmt - pF );
pOutput += snprintf ( Tail ( pOutput ), Max ( (int)iWidth, 32 ) - 1, sFormat, fValue );
} else
{
// plain %f - output arbitrary 6 or 8 digits
pOutput += PrintVarFloat ( Tail ( pOutput ), Max ( (int)iWidth, 32 ) - 1, (float)fValue );
assert (( sFmt - pF )==2 );
}
state = SNORMAL;
break;
}
default:
state = SNORMAL;
*Tail ( pOutput ) = c;
++pOutput;
}
}
// final zero
*Tail ( pOutput ) = c;
}
int vSprintf ( char * pOutput, const char * sFmt, va_list ap )
{
auto pBegin = pOutput;
sph::vSprintf_T ( &pOutput, sFmt, ap );
return int ( pOutput - pBegin );
}
int Sprintf ( char * pOutput, const char * sFmt, ... )
{
auto pBegin = pOutput;
va_list ap;
va_start ( ap, sFmt );
sph::vSprintf_T ( &pOutput, sFmt, ap );
va_end ( ap );
return int ( pOutput - pBegin );
}
void vSprintf ( StringBuilder_c &dOutput, const char * sFmt, va_list ap )
{
sph::vSprintf_T ( &dOutput, sFmt, ap );
}
void Sprintf ( StringBuilder_c& dOutput, const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
vSprintf ( dOutput, sFmt, ap );
va_end ( ap );
}
int PrintVarFloat ( char* sBuffer, int iSize, float fVal )
{
int iLen = snprintf ( sBuffer, iSize, "%f", fVal );
auto fTest = strtof ( sBuffer, nullptr );
if ( fTest!=fVal )
return snprintf ( sBuffer, iSize, "%1.8f", fVal );
return iLen;
}
int PrintVarDouble ( char* sBuffer, int iSize, double fVal )
{
int iLen = snprintf ( sBuffer, iSize, "%f", fVal );
auto fTest = strtod ( sBuffer, nullptr );
if ( fTest!=fVal )
return snprintf ( sBuffer, iSize, "%1.8f", fVal );
return iLen;
}
SmallStringHash_T<CSphString> ParseKeyValueStrings ( const char * sBuf )
{
SmallStringHash_T<CSphString> hRes;
sph::ParseKeyValues ( sBuf, [&] ( CSphString && sIdent, CSphString && sValue )
{
hRes.Add ( sValue, sIdent );
});
return hRes;
}
SmallStringHash_T<CSphVariant> ParseKeyValueVars ( const char * sBuf )
{
SmallStringHash_T<CSphVariant> hRes;
sph::ParseKeyValues ( sBuf, [&] ( CSphString && sIdent, CSphString && sValue )
{
hRes.Add ( CSphVariant (sValue.cstr()), sIdent );
});
return hRes;
}
} // namespace sph
static int sphVSprintf ( char * pOutput, const char * sFmt, va_list ap )
{
auto iRes = sph::vSprintf (pOutput,sFmt,ap);
pOutput[iRes++]='\n'; // final zero to EOL
return iRes;
}
static char g_sSafeInfoBuf [ 1024 ];
void sphSafeInfo ( int iFD, const char * sFmt, ... )
{
if ( iFD<0 || !sFmt )
return;
va_list ap;
va_start ( ap, sFmt );
int iLen = sphVSprintf ( g_sSafeInfoBuf, sFmt, ap ); // FIXME! make this vsnprintf
va_end ( ap );
sphWrite ( iFD, g_sSafeInfoBuf, size_t (iLen) );
}
static int sphSafeInfo ( char * pBuf, const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
int iLen = sphVSprintf ( pBuf, sFmt, ap ); // FIXME! make this vsnprintf
va_end ( ap );
return iLen;
}
volatile int& getParentPID ()
{
static int iParentPID = 0;
return iParentPID;
}
volatile bool& getSafeGDB ()
{
static bool bHaveJemalloc = true;
return bHaveJemalloc;
}
#if !_WIN32
#define SPH_BACKTRACE_ADDR_COUNT 128
#define SPH_BT_BINARY_NAME 2
#define SPH_BT_ADDRS 3
static void * g_pBacktraceAddresses [SPH_BACKTRACE_ADDR_COUNT];
static char g_pBacktrace[4096];
static const char g_sSourceTail[] = "> source.txt\n";
static const char * g_pArgv[128] = { "addr2line", "-e", "./searchd", "0x0", NULL };
static CSphString g_sBinaryName;
static auto& g_bSafeGDB = getSafeGDB ();
#ifdef HAVE_SYS_PRCTL_H
static char g_sNameBuf[512] = {0};
static char g_sPid[30] = { 0 };
#endif
// if we already run under debugger (i.e. from a kind of IDE) - avoid any scripted crash holding.
bool IsDebuggerPresent()
{
const int status_fd = ::open ( "/proc/self/status", O_RDONLY );
if ( status_fd==-1 )
return false;
const ssize_t num_read = ::read ( status_fd, g_pBacktrace, sizeof ( g_pBacktrace ) - 1 );
if ( num_read<=0 )
return false;
g_pBacktrace[num_read] = '\0';
constexpr char tracerPidString[] = "TracerPid:";
const auto tracer_pid_ptr = ::strstr ( g_pBacktrace, tracerPidString );
if ( !tracer_pid_ptr )
return false;
for ( const char * characterPtr = tracer_pid_ptr + sizeof ( tracerPidString ) - 1;
characterPtr<=g_pBacktrace + num_read; ++characterPtr )
{
if ( ::isspace ( *characterPtr ) )
continue;
else
return ::isdigit ( *characterPtr )!=0 && *characterPtr!='0';
}
return false;
}
static bool DumpGdb ( int iFD )
{
auto & iParentPID = getParentPID ();
if ( iParentPID>0 ) // most safe - ask watchdog to do everything
{
sphSafeInfo ( iFD, "Dump with GDB via watchdog");
kill ( iParentPID, SIGUSR1 );
sphSleepMsec (3*1000);
return true;
}
#ifdef HAVE_SYS_PRCTL_H
int iPos = sphSafeInfo ( g_sPid, "%d", getpid () );
g_sPid[iPos-1] = '\0'; // make null-terminated from EOL string
g_sNameBuf [ ::readlink ( "/proc/self/exe", g_sNameBuf, 511 ) ] = '\0';
if ( g_bSafeGDB || iParentPID==-1 ) // jemalloc looks safe, or user explicitly asked to invoke gdb anyway
return sphDumpGdb ( iFD, g_sNameBuf, g_sPid );
#endif
sphSafeInfo ( iFD, "Dump with GDB is not available" );
return false;
}
bool sphDumpGdb (int iFD, const char* sName, const char* sPid )
{
#ifdef HAVE_SYS_PRCTL_H
if ( IsDebuggerPresent ())
return false;
#ifdef PR_SET_PTRACER
// allow to trace us
prctl ( PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0 );
#endif
sigset_t signal_set;
sigemptyset ( &signal_set );
sigaddset ( &signal_set, SIGTERM );
sigaddset ( &signal_set, SIGINT );
sigaddset ( &signal_set, SIGSEGV );
sigaddset ( &signal_set, SIGABRT );
sigaddset ( &signal_set, SIGILL );
sigaddset ( &signal_set, SIGUSR1 );
sigaddset ( &signal_set, SIGHUP );
// Block signals to child processes
sigprocmask ( SIG_BLOCK, &signal_set, NULL );
// Spawn helper process which will keep running when gdb is attached to us
pid_t iPidIntermediate = fork ();
if ( iPidIntermediate==-1 )
return false;
if ( !iPidIntermediate ) // the helper, run gdb
{
// Wathchdog 1: Used to kill sub processes to gdb which may hang
pid_t iWait30 = fork ();
if ( iWait30==-1 )
_Exit ( 1 );
if ( !iWait30 )
{
sphSleepMsec ( 30000 );
_Exit ( 1 );
}
// Wathchdog 2: Give up on gdb, if it still does not finish even after killing its sub processes
pid_t iWait60 = fork ();
if ( iWait60==-1 )
{
kill ( iWait30, SIGKILL );
_Exit ( 1 );
}
if ( !iWait60 )
{
sphSleepMsec ( 60000 );
_Exit ( 1 );
}
// Worker: Spawns gdb
pid_t iWorker = fork ();
if ( !iWorker )
{
sphSafeInfo ( iFD, "Will run gdb on '%s', pid '%s'", sName, sPid );
if ( dup2 ( iFD, STDOUT_FILENO )==-1 )
_Exit ( 1 );
if ( dup2 ( iFD, STDERR_FILENO )==-1 )
_Exit ( 1 );
execlp ( "gdb", "gdb", "-batch", "-n",
"-ex", "info threads",
"-ex", "thread apply all bt",
"-ex", "echo \nMain thread:\n",
"-ex", "bt",
"-ex", "echo \nLocal variables:\n",
"-ex", "info locals",
"-ex", "detach",
"-se", sName, "-p", sPid, nullptr );
// If gdb failed to start, signal back
_Exit ( 1 );
}
int iResult = 1;
while ( iWorker || iWait30 || iWait60 )
{
int iStatus;
pid_t iExited = wait ( &iStatus );
if ( iExited == iWorker )
{
if ( WIFEXITED( iStatus ) && WEXITSTATUS( iStatus )==0 )
iResult = 0; // Success
else
iResult = 2; // Failed to start gdb
iWorker = 0;
if ( iWait60 )
kill ( iWait60, SIGKILL );
if ( iWait30 )
kill ( iWait30, SIGKILL );
} else if ( iExited == iWait30 )
{
iWait30 = 0;
if ( iWorker )
{
sphSafeInfo ( g_sNameBuf, "pkill -KILL -P %d", iWorker );
auto iFoo = ::system ( g_sNameBuf );
(void) iFoo; // that is unused
}
} else if ( iExited == iWait60 )
{
iWait60 = 0;
if ( iWorker )
kill ( iWorker, SIGKILL );
if ( iWait30 )
kill ( iWait30, SIGKILL );
}
}
_Exit ( iResult );
}
// main process
assert ( iPidIntermediate>0 );
int iStatus;
sigprocmask ( SIG_UNBLOCK, &signal_set, nullptr );
pid_t iRes = waitpid ( iPidIntermediate, &iStatus, 0 );
if ( iRes==-1 || iRes==0 )
return false;
// master branch is mirrored on github, so could generate more info here.
if ( strncmp ( szGIT_BRANCH_ID, "git branch master", 17 ) == 0 ) {
sphSafeInfo ( iFD, "You can obtain the sources of this version from https://github.com/manticoresoftware/manticoresearch/archive/%s.zip\n"
"and set up debug env with this shippet (select wget or curl version below):\n\n"
" wget https://codeload.github.com/manticoresoftware/manticoresearch/zip/%s -O manticore.zip\n"
" curl https://codeload.github.com/manticoresoftware/manticoresearch/zip/%s -o manticore.zip",
szGIT_COMMIT_ID, szGIT_COMMIT_ID, szGIT_COMMIT_ID );
sphSafeInfo ( iFD,
"\nUnpack the sources by command:\n"
" mkdir -p /tmp/manticore && unzip manticore.zip -d /tmp/manticore\n\n"
"For comfortable debug also suggest to append a substitution def to your ~/.gdbinit file:\n"
" set substitute-path \"%s\" /tmp/manticore/manticoresearch-%s", szGDB_SOURCE_DIR, szGIT_COMMIT_ID );
}
return true;
#else
return false;
#endif
}
#if HAVE_BACKTRACE & HAVE_BACKTRACE_SYMBOLS
const char * DoBacktrace ( int iDepth, int iSkip )
{
if ( !iDepth || iDepth > SPH_BACKTRACE_ADDR_COUNT )
iDepth = SPH_BACKTRACE_ADDR_COUNT;
iDepth = backtrace ( g_pBacktraceAddresses, iDepth );
char ** ppStrings = backtrace_symbols ( g_pBacktraceAddresses, iDepth );
if ( !ppStrings )
return NULL;
char * pDst = g_pBacktrace;
for ( int i=iSkip; i<iDepth; ++i )
{
const char * pStr = ppStrings[i];
do
*pDst++ = *pStr++;
while (*pStr);
*pDst++='\n';
}
*pDst = '\0';
free ( ppStrings );
return g_pBacktrace; ///< sorry, no backtraces on Windows...
}
#else
const char * DoBacktrace ( int, int )
{
return nullptr; ///< sorry, no backtraces...
}
#endif
#if !_WIN32
inline bool IsLtLib()
{
#ifndef _CS_GNU_LIBPTHREAD_VERSION
return false;
#else
char buff[64];
confstr ( _CS_GNU_LIBPTHREAD_VERSION, buff, 64 );
if ( !strncasecmp ( buff, "linuxthreads", 12 ) )
return true;
return false;
#endif
}
#endif
#define BOOST_STACKTRACE_GNU_SOURCE_NOT_REQUIRED 1
#if !_WIN32
// #define BOOST_STACKTRACE_USE_ADDR2LINE 1
#endif
#include <boost/stacktrace.hpp>
void sphBacktrace ( int iFD, bool bSafe )
{
if ( iFD<0 )
return;
sphSafeInfo ( iFD, "-------------- backtrace begins here ---------------" );
#ifdef COMPILER
sphSafeInfo ( iFD, "Program compiled with " COMPILER );
#endif
#ifdef CONFIGURE_FLAGS
sphSafeInfo ( iFD, "Configured with flags: " CONFIGURE_FLAGS );
#endif
#ifdef OS_UNAME
sphSafeInfo ( iFD, "Built on " OS_UNAME );
#endif
bool bOk = true;
const void * pMyStack = nullptr;
int iStackSize = 0;
if ( !bSafe )
{
pMyStack = Threads::MyStack();
iStackSize = Threads::MyStackSize();
}
sphSafeInfo ( iFD, "Stack bottom = 0x%p, thread stack size = 0x%x", pMyStack, iStackSize );
while ( pMyStack && !bSafe )
{
sphSafeInfo ( iFD, "Trying manual backtrace:" );
BYTE ** pFramePointer = NULL;
int iFrameCount = 0;
int iReturnFrameCount = IsLtLib() ? 2 : 1;
#ifdef __i386__
#define SIGRETURN_FRAME_OFFSET 17
__asm __volatile__ ( "movl %%ebp,%0":"=r"(pFramePointer):"r"(pFramePointer) );
#endif
#ifdef __x86_64__
#define SIGRETURN_FRAME_OFFSET 23
__asm __volatile__ ( "movq %%rbp,%0":"=r"(pFramePointer):"r"(pFramePointer) );
#endif
#ifndef SIGRETURN_FRAME_OFFSET
#define SIGRETURN_FRAME_OFFSET 0
#endif
if ( !pFramePointer )
{
sphSafeInfo ( iFD, "Frame pointer is null, manual backtrace failed (did you build with -fomit-frame-pointer?)" );
break;
}
if ( !pMyStack || (BYTE*) pMyStack > (BYTE*) &pFramePointer )
{
int iRound = Min ( 65536, iStackSize );
pMyStack = (void *) ( ( (size_t) &pFramePointer + iRound ) & ~(size_t)65535 );
sphSafeInfo ( iFD, "Something wrong with thread stack, manual backtrace may be incorrect (fp=0x%p)", pFramePointer );
if ( pFramePointer > (BYTE**) pMyStack || pFramePointer < (BYTE**) pMyStack - iStackSize )
{
sphSafeInfo ( iFD, "Wrong stack limit or frame pointer, manual backtrace failed (fp=0x%p, stack=0x%p, stacksize=0x%x)",
pFramePointer, pMyStack, iStackSize );
break;
}
}
sphSafeInfo ( iFD, "Stack looks OK, attempting backtrace." );
BYTE** pNewFP = NULL;
while ( pFramePointer < (BYTE**) pMyStack )
{
pNewFP = (BYTE**) *pFramePointer;
sphSafeInfo ( iFD, "0x%p", iFrameCount==iReturnFrameCount ? *(pFramePointer + SIGRETURN_FRAME_OFFSET) : *(pFramePointer + 1) );
bOk = pNewFP > pFramePointer;
if ( !bOk ) break;
pFramePointer = pNewFP;
iFrameCount++;
}
if ( !bOk )
sphSafeInfo ( iFD, "Something wrong in frame pointers, manual backtrace failed (fp=%p)", pNewFP );
break;
}
int iDepth = 0;
#if HAVE_BACKTRACE
sphSafeInfo ( iFD, "Trying system backtrace:" );
iDepth = backtrace ( g_pBacktraceAddresses, SPH_BACKTRACE_ADDR_COUNT );
if ( iDepth>0 )
bOk = true;
#if HAVE_BACKTRACE_SYMBOLS
sphSafeInfo ( iFD, "begin of system symbols:" );
backtrace_symbols_fd ( g_pBacktraceAddresses, iDepth, iFD );
#elif !HAVE_BACKTRACE_SYMBOLS
sphSafeInfo ( iFD, "begin of manual symbols:" );
for ( int i=0; i<iDepth; i++ )
sphSafeInfo ( iFD, "%p", g_pBacktraceAddresses[i] );
#endif // HAVE_BACKTRACE_SYMBOLS
#endif // !HAVE_BACKTRACE
sphSafeInfo ( iFD, "Trying boost backtrace:" );
sphSafeInfo ( iFD, to_string ( boost::stacktrace::stacktrace() ).c_str() );
sphSafeInfo ( iFD, "-------------- backtrace ends here ---------------" );
if ( bOk )
sphSafeInfo ( iFD, "Please, create a bug report in our bug tracker (https://github.com/manticoresoftware/manticore/issues)\n"
"and attach there:\n"
"a) searchd log, b) searchd binary, c) searchd symbols.\n"
"Look into the chapter 'Reporting bugs' in the manual\n"
"(https://manual.manticoresearch.com/Reporting_bugs)" );
if ( DumpGdb ( iFD ) )
return;
// convert all BT addresses to source code lines
int iCount = Min ( iDepth, (int)( sizeof(g_pArgv)/sizeof(g_pArgv[0]) - SPH_BT_ADDRS - 1 ) );
sphSafeInfo ( iFD, "--- BT to source lines (depth %d): ---", iCount );
char * pCur = g_pBacktrace;
for ( int i=0; i<iCount; i++ )
{
// early out on strings buffer overrun
if ( pCur>=g_pBacktrace+sizeof(g_pBacktrace)-48 )
{
iCount = i;
break;
}
g_pArgv[i+SPH_BT_ADDRS] = pCur;
pCur += sphSafeInfo ( pCur, "0x%x", g_pBacktraceAddresses[i] );
*(pCur-1) = '\0'; // make null terminated string from EOL string
}
g_pArgv[iCount+SPH_BT_ADDRS] = NULL;
int iChild = fork();
if ( iChild==0 )
{
// map stdout to log file
if ( iFD!=1 )
{
close ( 1 );
dup2 ( iFD, 1 );
}
execvp ( g_pArgv[0], const_cast<char **> ( g_pArgv ) ); // using execvp instead execv to auto find addr2line in directories
// if we here - execvp failed, ask user to do conversion manually
sphSafeInfo ( iFD, "conversion failed (error '%s'):\n"
" 1. Run the command provided below over the crashed binary (for example, '%s'):\n"
" 2. Attach the source.txt to the bug report.", strerrorm ( errno ), g_pArgv[SPH_BT_BINARY_NAME] );
int iColumn = 0;
for ( int i=0; g_pArgv[i]!=NULL; i++ )
{
const char * s = g_pArgv[i];
while ( *s )
s++;
size_t iLen = s-g_pArgv[i];
sphWrite ( iFD, g_pArgv[i], iLen );
sphWrite ( iFD, " ", 1 );
int iWas = iColumn % 80;
iColumn += iLen;
int iNow = iColumn % 80;
if ( iNow<iWas )
sphWrite ( iFD, "\n", 1 );
}
sphWrite ( iFD, g_sSourceTail, sizeof(g_sSourceTail)-1 );
exit ( 1 );
} else
if ( iChild==-1 )
{
sphSafeInfo ( iFD, "fork for running execvp failed: [%d] %s", errno, strerrorm(errno) );
return;
}
int iStatus, iResult;
do
{
// can be interrupted by pretty much anything (e.g. SIGCHLD from other searchd children)
iResult = waitpid ( iChild, &iStatus, 0 );
// they say this can happen if child exited and SIGCHLD was ignored
// a cleaner one would be to temporary handle it here, but can we be bothered
if ( iResult==-1 && errno==ECHILD )
{
iResult = iChild;
iStatus = 0;
}
if ( iResult==-1 && errno!=EINTR )
{
sphSafeInfo ( iFD, "waitpid() failed: [%d] %s", errno, strerrorm(errno) );
return;
}
} while ( iResult!=iChild );
sphSafeInfo ( iFD, "--- BT to source lines finished ---" );
}
void sphBacktraceSetBinaryName ( const char * sName )
{
g_sBinaryName = sName;
g_pArgv[SPH_BT_BINARY_NAME] = g_sBinaryName.cstr();
}
void sphBacktraceInit()
{
#if HAVE_BACKTRACE
backtrace ( g_pBacktraceAddresses, SPH_BACKTRACE_ADDR_COUNT );
#endif // !HAVE_BACKTRACE
// check that jemalloc is present
bool bSafeGdb = true;
#if HAVE_DLOPEN
void * fnJMalloc = dlsym ( RTLD_DEFAULT, "mallctl" );
bSafeGdb = ( fnJMalloc!=nullptr );
#endif
#ifndef NDEBUG
bSafeGdb = true;
#endif
g_bSafeGDB = bSafeGdb;
}
#else // _WIN32
const char * DoBacktrace ( int, int )
{
return NULL; ///< sorry, no backtraces on Windows...
}
void sphBacktrace ( EXCEPTION_POINTERS * pExc, const char * sFile )
{
if ( !pExc || !sFile || !(*sFile) )
{
sphInfo ( "can't generate minidump" );
return;
}
HANDLE hFile = CreateFile ( sFile, GENERIC_WRITE, 0, 0, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0 );
if ( hFile==INVALID_HANDLE_VALUE )
{
sphInfo ( "can't create minidump file '%s'", sFile );
return;
}
MINIDUMP_EXCEPTION_INFORMATION tExcInfo;
tExcInfo.ExceptionPointers = pExc;
tExcInfo.ClientPointers = FALSE;
tExcInfo.ThreadId = GetCurrentThreadId();
bool bDumped = ( MiniDumpWriteDump ( GetCurrentProcess(), GetCurrentProcessId(), hFile, MiniDumpNormal, &tExcInfo, 0, 0 )==TRUE );
CloseHandle ( hFile );
if ( !bDumped )
sphInfo ( "can't dump minidump" );
}
void sphBacktraceSetBinaryName ( const char * )
{
}
void sphBacktraceInit()
{
}
#endif // _WIN32
// if we configured with jemalloc headers, and determine that it's stats function is available - use it with param.
// else check, if malloc_stats is available, and use it.
// Otherwise just do nothing.
void sphMallocStats ( const char* szParams )
{
#ifdef HAVE_JEMALLOC_JEMALLOC_H
// check that jemalloc is present
static bool bInitialized = false;
using MallocStats_FN = decltype ( &malloc_stats_print );
static MallocStats_FN fnJMallocStats = nullptr;
if (!bInitialized)
{
#if HAVE_DLOPEN
fnJMallocStats = (MallocStats_FN) dlsym ( RTLD_DEFAULT, "malloc_stats_print" );
#endif
bInitialized = true;
}
if (fnJMallocStats)
return ( *fnJMallocStats ) ( nullptr, nullptr, szParams );
#endif
#if HAVE_MALLOC_STATS
return malloc_stats();
#endif
}
static bool g_bUnlinkOld = true;
void sphSetUnlinkOld ( bool bUnlink )
{
g_bUnlinkOld = bUnlink;
}
bool sphGetUnlinkOld ()
{
return g_bUnlinkOld;
}
void sphUnlinkIndex ( const char * sName, bool bForce )
{
if ( !( g_bUnlinkOld || bForce ) )
return;
IndexFiles_c ( sName ).Unlink ();
}
void sphCheckDuplicatePaths ( const CSphConfig & hConf )
{
if ( !hConf.Exists ( "index" ) )
return;
CSphOrderedHash < CSphString, CSphString, CSphStrHashFunc, 256 > hPaths;
for ( const auto& tVal : hConf["index"] )
{
const CSphConfigSection & hIndex = tVal.second;
if ( hIndex ( "path" ) )
{
const CSphString & sIndex = tVal.first;
if ( hPaths ( hIndex["path"].strval() ) )
sphDie ( "duplicate paths: table '%s' has the same path as '%s'.\n", sIndex.cstr(), hPaths[hIndex["path"].strval()].cstr() );
hPaths.Add ( sIndex, hIndex["path"].strval() );
}
}
}
void sphConfigureCommon ( const CSphConfig & hConf, FixPathAbsolute_fn && fnPathFix )
{
if ( !hConf("common") || !hConf["common"]("common") )
{
sphPluginInit ( nullptr );
return;
}
CSphConfigSection & hCommon = hConf["common"]["common"];
if ( hCommon ( "lemmatizer_base" ) )
{
if ( fnPathFix )
fnPathFix ( g_sLemmatizerBase );
g_sLemmatizerBase = hCommon.GetStr ( "lemmatizer_base" );
}
bool bJsonStrict = false;
bool bJsonAutoconvNumbers;
bool bJsonKeynamesToLowercase = false;
if ( hCommon("on_json_attr_error") )
{
const CSphString & sVal = hCommon["on_json_attr_error"].strval();
if ( sVal=="ignore_attr" )
bJsonStrict = false;
else if ( sVal=="fail_index" )
bJsonStrict = true;
else
sphDie ( "unknown on_json_attr_error value (must be one of ignore_attr, fail_index)" );
}
if ( hCommon("json_autoconv_keynames") )
{
const CSphString & sVal = hCommon["json_autoconv_keynames"].strval();
if ( sVal=="lowercase" )
bJsonKeynamesToLowercase = true;
else
sphDie ( "unknown json_autoconv_keynames value (must be 'lowercase')" );
}
bJsonAutoconvNumbers = ( hCommon.GetInt ( "json_autoconv_numbers", 0 )!=0 );
sphSetJsonOptions ( bJsonStrict, bJsonAutoconvNumbers, bJsonKeynamesToLowercase );
if ( hCommon( "plugin_dir" ) )
{
CSphString sPluginDir ( hCommon["plugin_dir"].cstr() );
if ( fnPathFix )
fnPathFix ( sPluginDir );
sphPluginInit ( sPluginDir.cstr() );
}
}
bool sphDetectChinese ( const BYTE * szBuffer, int iLength )
{
if ( !szBuffer || !iLength )
return false;
const BYTE * pBuffer = szBuffer;
while ( pBuffer<szBuffer+iLength )
{
int iCode = sphUTF8Decode ( pBuffer );
if ( sphIsChineseCode ( iCode ) )
return true;
}
return false;
}
#if HAVE_DLOPEN
void CSphDynamicLibrary::CSphDynamicLibraryAlternative ( const char* szPath, bool bGlobal )
{
if ( m_bReady || m_pLibrary )
return;
m_pLibrary = dlopen ( szPath, RTLD_NOW | ( bGlobal ? RTLD_GLOBAL : RTLD_LOCAL ) );
if ( !m_pLibrary )
sphLogDebug ( "dlopen(%s) failed", szPath );
else
sphLogDebug ( "dlopen(%s)=%p", szPath, m_pLibrary );
}
CSphDynamicLibrary::CSphDynamicLibrary ( const char * sPath, bool bGlobal )
: m_bReady ( false )
, m_pLibrary ( nullptr )
{
CSphDynamicLibraryAlternative ( sPath, bGlobal );
}
CSphDynamicLibrary::~CSphDynamicLibrary()
{
if ( m_pLibrary )
dlclose ( m_pLibrary );
};
bool CSphDynamicLibrary::LoadSymbols ( const char** sNames, void*** pppFuncs, int iNum )
{
if ( !m_pLibrary )
return false;
if ( m_bReady )
return true;
for ( int i=0; i<iNum; ++i )
{
void* pResult = dlsym ( m_pLibrary, sNames[i] );
if ( !pResult )
{
sphLogDebug ( "Symbol %s not found", sNames[i] );
return false;
}
// yes, it is void*** - triple pointer.
// void* is the legacy pointer (to the function, in this case).
// void** is the variable where we store the pointer to the function.
// that is to cast all different pointers to legacy void*.
// we put the addresses to these variables into array, and it adds
// one more level of indirection. void*** actually is void**[]
*pppFuncs[i] = pResult;
}
m_bReady = true;
return true;
};
#else
void CSphDynamicLibrary::CSphDynamicLibraryAlternative ( const char *, bool ) {};
CSphDynamicLibrary::CSphDynamicLibrary ( const char *, bool ) {};
bool CSphDynamicLibrary::LoadSymbols ( const char **, void ***, int ) { return false; }
CSphDynamicLibrary::~CSphDynamicLibrary() = default;
#endif
// calculate new weights as inverse freqs of timers, giving also small probability to bad timers.
void RebalanceWeights ( const CSphFixedVector<int64_t> & dTimers, CSphFixedVector<float>& dWeights )
{
assert ( dTimers.GetLength () );
float fSum = 0.0;
int iAlive = 0;
// weights are proportional to frequencies (inverse to timers)
CSphFixedVector<float> dFrequencies { dTimers.GetLength () };
ARRAY_FOREACH ( i, dTimers )
if ( dTimers[i]>0 )
{
dFrequencies[i] = ( 1.0f / dTimers[i] );
fSum += dFrequencies[i];
++iAlive;
}
// no statistics, all timers bad, keep previous weights
if ( !iAlive )
return;
// if one or more bad (empty) timers provided, give fEmptiesPercent frac to all of them,
// and also assume fEmptiesPercent/num_of_deads fraq per each of them.
float fEmptyPercent = 0.0f;
// balance weights
Debug ( float fCheck = 0; )
ARRAY_FOREACH ( i, dFrequencies )
{
// mirror weight is inverse of timer \ query time
float fWeight = 100.0f * dFrequencies[i] / fSum;
// mirror without response
if ( dTimers[i]<=0 )
fWeight = fEmptyPercent;
assert ( fWeight>=0.0 && fWeight<=100.0 );
dWeights[i] = fWeight;
Debug ( fCheck += fWeight; )
}
assert ( fCheck<=100.000001 && fCheck>=99.99999);
}
/// collect warnings/errors from any suitable context.
Warner_c::Warner_c ( const char * sDel, const char * sPref, const char * sTerm )
: m_sWarnings ( sDel, sPref, sTerm )
, m_sErrors ( sDel, sPref, sTerm )
, m_sDel ( sDel )
, m_sPref ( sPref )
, m_sTerm ( sTerm )
{}
Warner_c::Warner_c ( Warner_c &&rhs ) noexcept
{
m_sWarnings = std::move (rhs.m_sWarnings);
m_sErrors = std::move (rhs.m_sErrors);
m_sDel = rhs.m_sDel;
m_sPref = rhs.m_sPref;
m_sTerm = rhs.m_sTerm;
}
Warner_c& Warner_c::operator= ( Warner_c && rhs ) noexcept
{
if ( &rhs!=this )
{
m_sWarnings = std::move ( rhs.m_sWarnings );
m_sErrors = std::move ( rhs.m_sErrors );
m_sDel = rhs.m_sDel;
m_sPref = rhs.m_sPref;
m_sTerm = rhs.m_sTerm;
}
return *this;
}
void Warner_c::Err ( const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
m_sErrors.vSprintf ( sFmt, ap );
va_end ( ap );
}
void Warner_c::Err ( const CSphString &sMsg )
{
m_sErrors << sMsg;
}
void Warner_c::Warn ( const char * sFmt, ... )
{
va_list ap;
va_start ( ap, sFmt );
m_sWarnings.vSprintf ( sFmt, ap );
va_end ( ap );
}
void Warner_c::Warn ( const CSphString &sMsg )
{
m_sWarnings << sMsg;
}
void Warner_c::Clear ()
{
m_sErrors.Clear ();
m_sWarnings.Clear();
if ( m_sDel || m_sPref || m_sTerm )
{
m_sErrors.StartBlock ( m_sDel, m_sPref, m_sTerm );
m_sWarnings.StartBlock ( m_sDel, m_sPref, m_sTerm );
}
}
const char * Warner_c::sError () const
{
return m_sErrors.cstr();
}
const char * Warner_c::sWarning () const
{
return m_sWarnings.cstr();
}
void Warner_c::AddStringsFrom ( const Warner_c &sSrc )
{
if ( !sSrc.WarnEmpty () )
m_sWarnings << sSrc.sWarning ();
if ( !sSrc.ErrEmpty () )
m_sWarnings << sSrc.sError ();
}
void Warner_c::MoveErrorsTo ( CSphString &sTarget )
{
m_sErrors.FinishBlocks();
m_sErrors.MoveTo ( sTarget );
}
void Warner_c::MoveWarningsTo ( CSphString &sTarget )
{
m_sWarnings.FinishBlocks();
m_sWarnings.MoveTo ( sTarget );
}
void Warner_c::MoveAllTo ( CSphString &sTarget )
{
m_sErrors.FinishBlocks();
m_sWarnings.FinishBlocks();
StringBuilder_c sCollection ( "; ", m_sPref, m_sTerm );
sCollection.StartBlock ( nullptr, "ERRORS: ");
sCollection << m_sErrors.cstr();
sCollection.FinishBlock();
sCollection.StartBlock ( nullptr, "WARNINGS: " );
sCollection << m_sWarnings.cstr();
sCollection.FinishBlocks();
sCollection.MoveTo ( sTarget );
Clear();
}
namespace TlsMsg
{
// static thread_local StringBuilder_c sTlsMsgs;
inline StringBuilder_c& TlsMsgs() noexcept
{
return *Threads::MyThd().m_pTlsMsg.load ( std::memory_order_relaxed );
}
bool Err( const char* sFmt, ... )
{
StringBuilder_c sMsgs;
va_list ap;
va_start ( ap, sFmt );
sMsgs.vSprintf( sFmt, ap );
va_end ( ap );
TlsMsgs().Swap ( sMsgs );
return false;
}
bool Err( const CSphString& sMsg )
{
if (sMsg.IsEmpty())
return true;
TlsMsgs() << sMsg;
return false;
}
void ResetErr() { TlsMsgs().Clear(); }
StringBuilder_c& Err() { return TlsMsgs(); }
const char* szError() { return TlsMsgs().cstr(); }
void MoveError ( CSphString& sError )
{
if ( TlsMsgs().IsEmpty())
return;
TlsMsgs().MoveTo(sError);
}
CSphString MoveToString ()
{
CSphString sError;
TlsMsgs().MoveTo ( sError );
return sError;
}
bool HasErr() { return !TlsMsgs().IsEmpty(); }
}
const char * GetBaseName ( const CSphString & sFullPath )
{
if ( sFullPath.IsEmpty() )
return nullptr;
const char * pStart = sFullPath.cstr();
const char * pCur = pStart + sFullPath.Length() - 1;
while ( pCur>pStart && pCur[-1]!='/' && pCur[-1]!='\\' )
pCur--;
return pCur;
}
struct UUID_t
{
std::atomic<int64_t> m_iUID { 1 };
int64_t m_iUidBase = 0;
int64_t Get ()
{
int64_t iVal = m_iUID.fetch_add (1, std::memory_order_relaxed);
int64_t iUID = m_iUidBase + iVal;
return iUID;
}
};
static UUID_t g_tUidShort;
static UUID_t g_tIndexUid;
int64_t UidShort()
{
return g_tUidShort.Get();
}
int64_t GetIndexUid()
{
return g_tIndexUid.Get();
}
void UidShortSetup ( int iServer, int iStarted )
{
int64_t iSeed = ( (int64_t)iServer & 0x7f ) << 56;
iSeed += ((int64_t)iStarted ) << 24;
g_tUidShort.m_iUidBase = iSeed;
g_tIndexUid.m_iUidBase = iSeed;
sphLogDebug ( "uid-short server_id %d, started %d, seed " INT64_FMT, iServer, iStarted, iSeed );
}
// RNG of the integers 0-255
static BYTE g_dPearsonRNG[256] = {
98, 6, 85,150, 36, 23,112,164,135,207,169, 5, 26, 64,165,219, // 1
61, 20, 68, 89,130, 63, 52,102, 24,229,132,245, 80,216,195,115, // 2
90,168,156,203,177,120, 2,190,188, 7,100,185,174,243,162, 10, // 3
237, 18,253,225, 8,208,172,244,255,126,101, 79,145,235,228,121, // 4
123,251, 67,250,161, 0,107, 97,241,111,181, 82,249, 33, 69, 55, // 5
59,153, 29, 9,213,167, 84, 93, 30, 46, 94, 75,151,114, 73,222, // 6
197, 96,210, 45, 16,227,248,202, 51,152,252,125, 81,206,215,186, // 7
39,158,178,187,131,136, 1, 49, 50, 17,141, 91, 47,129, 60, 99, // 8
154, 35, 86,171,105, 34, 38,200,147, 58, 77,118,173,246, 76,254, // 9
133,232,196,144,198,124, 53, 4,108, 74,223,234,134,230,157,139, // 10
189,205,199,128,176, 19,211,236,127,192,231, 70,233, 88,146, 44, // 11
183,201, 22, 83, 13,214,116,109,159, 32, 95,226,140,220, 57, 12, // 12
221, 31,209,182,143, 92,149,184,148, 62,113, 65, 37, 27,106,166, // 13
3, 14,204, 72, 21, 41, 56, 66, 28,193, 40,217, 25, 54,179,117, // 14
238, 87,240,155,180,170,242,212,191,163, 78,218,137,194,175,110, // 15
43,119,224, 71,122,142, 42,160,104, 48,247,103, 15, 11,138,239 // 16
};
BYTE Pearson8 ( const BYTE * pBuf, int iLen )
{
const BYTE * pEnd = pBuf + iLen;
BYTE iNew = 0;
while ( pBuf<pEnd )
{
iNew = g_dPearsonRNG[ iNew ^ (*pBuf) ];
pBuf++;
}
return iNew;
}
static const char * g_dDateTimeFormats[] = {
"%Y-%m-%dT%H:%M:%E*S%Z",
"%Y-%m-%d'T'%H:%M:%S%Z",
"%Y-%m-%dT%H:%M:%E*S",
"%Y-%m-%dT%H:%M:%s",
"%Y-%m-%dT%H:%M",
"%Y-%m-%dT%H",
"%Y-%m-%d",
"%Y-%m",
"%Y"
};
int64_t GetUTC ( const CSphString & sTime, const char * pFormat )
{
if ( sTime.IsEmpty() )
return 0;
const char * szCur = sTime.cstr();
while ( isdigit(*szCur) )
szCur++;
// should be timestamp with only numeric values and at least 5 symbols
if ( !*szCur && (szCur-sTime.cstr())>4 )
return strtoul ( sTime.cstr(), nullptr, 10 );
time_t tConverted = 0;
if ( pFormat && *pFormat )
{
if ( ParseAsLocalTime ( pFormat, sTime, tConverted ) )
return tConverted;
}
else
{
// loop from the built-in formats from longest to shortest and try one by one
for ( const char * pFmt : g_dDateTimeFormats )
if ( ParseAsLocalTime ( pFmt, sTime, tConverted ) )
return tConverted;
}
return 0;
}
enum class DateMathOp_e
{
Mod,
Add,
Sub,
};
typedef CSphOrderedHash<DateUnit_e, CSphString, CSphStrHashFunc, 32> DateMathUnitNames_t;
static void DoDateMath ( DateMathOp_e eOp, DateUnit_e eUnit, int iVal, time_t & tDateTime );
static DateMathUnitNames_t InitMathUnits()
{
typedef std::pair<const char *, DateUnit_e> NamedUnit_t;
NamedUnit_t dUnits[] = {
// date math names
{"ms", DateUnit_e::ms }, {"s", DateUnit_e::sec}, {"m", DateUnit_e::minute}, {"h", DateUnit_e::hour}, {"d", DateUnit_e::day}, {"w", DateUnit_e::week}, {"M", DateUnit_e::month}, {"y", DateUnit_e::year},
// histogram names
{"minute", DateUnit_e::minute}, {"hour", DateUnit_e::hour}, {"day", DateUnit_e::day}, {"week", DateUnit_e::week}, {"month", DateUnit_e::month}, {"year", DateUnit_e::year}
};
DateMathUnitNames_t hRes;
for ( const auto & tUnit : dUnits )
hRes.Add ( tUnit.second, tUnit.first );
return hRes;
}
static DateMathUnitNames_t g_hDateMathUnits = InitMathUnits();
static bool ParseDateMath ( const Str_t & sMathExpr, time_t & tDateTime )
{
const char * sCur = sMathExpr.first;
const char * sEnd = sCur + sMathExpr.second;
while ( sCur<sEnd && *sCur )
{
DateMathOp_e eOp;
switch ( *sCur++ )
{
case '/' : eOp = DateMathOp_e::Mod; break;
case '+' : eOp = DateMathOp_e::Add; break;
case '-' : eOp = DateMathOp_e::Sub; break;
default: return false;
}
int iNum = 1;
if ( !sphIsDigital ( *sCur ) )
{
iNum = 1;
} else
{
char * sNumEnd = nullptr;
iNum = (int64_t)strtoull ( sCur, &sNumEnd, 10 );
sCur = sNumEnd;
}
// rounding is only allowed on whole, single, units (eg M or 1M, not 0.5M or 2M)
if ( eOp==DateMathOp_e::Mod && iNum!=1 )
return false;
const char * sUnitStart = sCur++;
while ( sCur<sEnd && sphIsAlphaOnly ( *sCur ) )
sCur++;
CSphString sUnit;
sUnit.SetBinary ( sUnitStart, sCur - sUnitStart );
DateUnit_e * pUnit = g_hDateMathUnits ( sUnit );
if ( !pUnit )
return false;
DoDateMath ( eOp, *pUnit, iNum, tDateTime );
}
return tDateTime;
}
bool ParseDateMath ( const CSphString & sMathExpr, int iNow, time_t & tDateTime )
{
if ( sMathExpr.IsEmpty() )
return false;
const char sNow[] = "now";
Str_t sExpr = FromStr ( sMathExpr );
if ( sMathExpr.Begins ( sNow ) )
{
tDateTime = iNow;
int iNowLen = sizeof ( sNow ) - 1;
sExpr.first += iNowLen;
sExpr.second -= iNowLen;
} else
{
CSphString sDateOnly;
const char * sFullDateDel = strstr ( sMathExpr.cstr(), "||" );
if ( !sFullDateDel )
{
sDateOnly = sMathExpr;
sExpr = Str_t(); // nothing else
} else
{
const int iDelimiterLen = 2;
int iOff = sFullDateDel - sMathExpr.cstr();
sDateOnly.SetBinary ( sMathExpr.cstr(), iOff );
sExpr = Str_t ( sFullDateDel + iDelimiterLen, sMathExpr.Length() - iOff - iDelimiterLen );
}
// We're going to just require ISO8601 timestamps, k?
tDateTime = GetUTC ( sDateOnly );
}
if ( IsEmpty ( sExpr ) )
return true;
return ParseDateMath ( sExpr, tDateTime );
}
DateUnit_e ParseDateInterval ( const CSphString & sExpr, CSphString & sError )
{
const char * sCur = sExpr.cstr();
const char * sEnd = sCur + sExpr.Length();
int iNum = 1;
if ( !sphIsDigital ( *sCur ) )
{
iNum = 1;
} else
{
char * sNumEnd = nullptr;
iNum = (int64_t)strtoull ( sCur, &sNumEnd, 10 );
sCur = sNumEnd;
}
// rounding is only allowed on whole, single, units (eg M or 1M, not 0.5M or 2M)
if ( iNum!=1 )
{
sError.SetSprintf ( "The supplied interval [%s] could not be parsed as a calendar interval", sExpr.cstr() );
return DateUnit_e::total_units;
}
const char * sUnitStart = sCur++;
while ( sCur<sEnd && sphIsAlphaOnly ( *sCur) )
sCur++;
CSphString sUnit;
sUnit.SetBinary ( sUnitStart, sCur - sUnitStart );
DateUnit_e * pUnit = g_hDateMathUnits ( sUnit );
if ( !pUnit )
{
sError.SetSprintf ( "unknown interval [%s]", sExpr.cstr() );
return DateUnit_e::total_units;
}
return *pUnit;
}
void RoundDate ( DateUnit_e eUnit, time_t & tDateTime )
{
if ( eUnit==DateUnit_e::ms )
return;
cctz::civil_second tSrcTime = ConvertTime ( tDateTime );
switch ( eUnit )
{
case DateUnit_e::sec:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month(), tSrcTime.day(), tSrcTime.hour(), tSrcTime.minute(), tSrcTime.second() ) );
break;
case DateUnit_e::minute:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month(), tSrcTime.day(), tSrcTime.hour(), tSrcTime.minute() ) );
break;
case DateUnit_e::hour:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month(), tSrcTime.day(), tSrcTime.hour() ) );
break;
case DateUnit_e::day:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month(), tSrcTime.day() ) );
break;
case DateUnit_e::week:
{
cctz::civil_day tWeekStart ( tSrcTime.year(), tSrcTime.month(), tSrcTime.day() );
if ( cctz::get_weekday ( tWeekStart )!=cctz::weekday::monday )
tWeekStart = cctz::prev_weekday ( tWeekStart, cctz::weekday::monday );
tDateTime = ConvertTime ( tWeekStart );
}
break;
case DateUnit_e::month:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month() ) );
break;
case DateUnit_e::year:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year() ) );
break;
default:
break;
}
}
void DoDateMath ( DateMathOp_e eOp, DateUnit_e eUnit, int iVal, time_t & tDateTime )
{
if ( eOp==DateMathOp_e::Mod )
{
RoundDate ( eUnit, tDateTime );
return;
}
if ( eOp==DateMathOp_e::Sub )
iVal = -iVal;
cctz::civil_second tSrcTime = ConvertTime ( tDateTime );
switch ( eUnit )
{
case DateUnit_e::ms:
{
int iMsLeft = iVal % 1000;
int iSec = iVal / 1000;
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month(), tSrcTime.day(), tSrcTime.hour(), tSrcTime.minute(), tSrcTime.second() + iSec ) );
tDateTime += iMsLeft;
}
break;
case DateUnit_e::sec:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month(), tSrcTime.day(), tSrcTime.hour(), tSrcTime.minute(), tSrcTime.second() + iVal ) );
break;
case DateUnit_e::minute:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month(), tSrcTime.day(), tSrcTime.hour(), tSrcTime.minute() + iVal, tSrcTime.second() ) );
break;
case DateUnit_e::hour:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month(), tSrcTime.day(), tSrcTime.hour() + iVal, tSrcTime.minute(), tSrcTime.second() ) );
break;
case DateUnit_e::day:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month(), tSrcTime.day() + iVal, tSrcTime.hour(), tSrcTime.minute(), tSrcTime.second() ) );
break;
case DateUnit_e::week:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month(), tSrcTime.day() + iVal*7, tSrcTime.hour(), tSrcTime.minute(), tSrcTime.second() ) );
break;
case DateUnit_e::month:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year(), tSrcTime.month() + iVal, tSrcTime.day(), tSrcTime.hour(), tSrcTime.minute(), tSrcTime.second() ) );
break;
case DateUnit_e::year:
tDateTime = ConvertTime ( cctz::civil_second ( tSrcTime.year() + iVal, tSrcTime.month(), tSrcTime.day(), tSrcTime.hour(), tSrcTime.minute(), tSrcTime.second() ) );
break;
default:
break;
}
}
static std::atomic<long> g_tIndexId { 0 };
int64_t GenerateIndexId()
{
return g_tIndexId.fetch_add ( 1, std::memory_order_relaxed );
}
void SetIndexId ( int64_t iId )
{
g_tIndexId.store ( iId );
}
bool HasWildcards ( const char * sWord )
{
if ( !sWord )
return false;
for ( ; *sWord; sWord++ )
{
if ( sphIsWild ( *sWord ) )
return true;
}
return false;
}
static RwLock_t hBreaksProtect;
static SmallStringHash_T<bool> hBreaks GUARDED_BY ( hBreaksProtect );
// sleep on named pause. Put into interest clauses in the code where a race expected
void PauseCheck ( const CSphString & sName )
{
auto fnCheck = [&sName] () {
ScRL_t tProtect { hBreaksProtect };
return hBreaks.Exists ( sName );
};
if ( !fnCheck () )
return;
sphInfo ( "Paused '%s'", sName.cstr () );
auto tmStart = sphMicroTimer ();
while ( fnCheck () )
sphSleepMsec ( 20 );
LogInfo ( "Released '%s' in %.3t", sName.cstr (), sphMicroTimer ()-tmStart );
}
// debug pause 'id' on / debug pause 'id' off
void PauseAt ( const CSphString& sName, bool bPause )
{
ScWL_t tProtect { hBreaksProtect };
auto bExist = hBreaks.Exists ( sName );
if ( !bPause && bExist )
hBreaks.Delete ( sName );
else if ( bPause && !bExist )
hBreaks.Add ( true, sName );
}
| 103,567
|
C++
|
.cpp
| 3,348
| 28.283751
| 201
| 0.636169
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,959
|
snippetfunctor.cpp
|
manticoresoftware_manticoresearch/src/snippetfunctor.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "snippetfunctor.h"
#include "sphinxexcerpt.h"
#include "sphinxsearch.h"
#include "snippetindex.h"
#include "snippetstream.h"
#include "snippetpassage.h"
#include "tokenizer/tokenizer.h"
//////////////////////////////////////////////////////////////////////////
#define UINT32_MASK 0xffffffffUL
struct SpanToken_t
{
DWORD m_uQwordMask; ///< query words mask
int m_iLengthCP; ///< token length in codepoints
BYTE m_iWordFlag; ///< token type
};
struct Space_t
{
int m_iStartBytes; ///< offset from doc start
int m_iLengthBytes; ///< length in bytes
int m_iLengthCP; ///< length in codepoints
};
//////////////////////////////////////////////////////////////////////////
class TokenSpan_c
{
public:
int m_iStart; ///< starting token
int m_iWords; ///< number of TOK_WORDS tokens
int m_iQwords; ///< number of words matching query
int m_iCodes; ///< total length in codepoints (cached)
void Init ( int nStoredTokens );
void Reset();
void ScaleUp();
void Add ( int iToken, bool bWordFlag, bool bQWord, int iTermIndex, int iLengthCP );
int GetNumTokens () const { return m_iNumTokens; }
const SpanToken_t & GetToken ( int iToken ) const;
void RemoveStartingTokens ( int nTokens );
private:
int m_iStartIndex;
int m_iNumTokens;
int m_iBufferBits; ///< log2(m_dTokens.GetLength())
DWORD m_uBufferMask;
CSphTightVector<SpanToken_t> m_dTokens;
};
void TokenSpan_c::Init ( int nStoredTokens )
{
m_iBufferBits = sphLog2 ( nStoredTokens-1 );
DWORD uBufferSize = 1<<m_iBufferBits;
m_uBufferMask = uBufferSize-1;
m_dTokens.Resize(uBufferSize);
Reset ();
}
void TokenSpan_c::Reset()
{
m_iStart = -1;
m_iWords = 0;
m_iQwords = 0;
m_iCodes = 0;
m_iStartIndex = 0;
m_iNumTokens = 0;
}
void TokenSpan_c::ScaleUp()
{
// not enough preallocated tokens, realloc
CSphTightVector<SpanToken_t> dNewTokens;
dNewTokens.Resize ( m_dTokens.GetLength()*2 );
for ( int i=0; i<m_iNumTokens; i++ )
dNewTokens[i] = GetToken(i);
dNewTokens.SwapData ( m_dTokens );
m_iBufferBits++;
m_uBufferMask = ( 1<<m_iBufferBits )-1;
m_iStartIndex = 0;
}
void TokenSpan_c::Add ( int iToken, bool bWordFlag, bool bQWord, int iTermIndex, int iLengthCP )
{
bQWord &= bWordFlag;
if ( m_iNumTokens==m_dTokens.GetLength() )
ScaleUp();
int iEndIndex = ( m_iStartIndex + m_iNumTokens ) & m_uBufferMask;
m_iNumTokens++;
SpanToken_t & tToken = m_dTokens [ iEndIndex ];
tToken.m_uQwordMask = ( bQWord && ( iTermIndex>=0 ) ) ? ( 1<<iTermIndex ) : 0;
tToken.m_iLengthCP = iLengthCP;
tToken.m_iWordFlag = bWordFlag;
m_iCodes += iLengthCP;
m_iWords += bWordFlag;
m_iQwords += bQWord;
if ( m_iStart<0 )
m_iStart = iToken;
}
const SpanToken_t & TokenSpan_c::GetToken ( int iToken ) const
{
assert ( iToken<=m_iNumTokens );
int iIndex = ( m_iStartIndex + iToken ) & m_uBufferMask;
return m_dTokens[iIndex];
}
void TokenSpan_c::RemoveStartingTokens ( int nTokens )
{
assert ( nTokens<m_iNumTokens );
m_iStartIndex = ( m_iStartIndex + nTokens ) & m_uBufferMask;
m_iNumTokens -= nTokens;
}
//////////////////////////////////////////////////////////////////////////
static bool IsTokenHit ( const TokenInfo_t & tTok, DWORD uHitPos, int iHitSpan, int iField )
{
DWORD uTokenPos = HITMAN::Create ( iField, tTok.m_uPosition );
if ( !tTok.m_iMultiPosLen )
return ( uTokenPos>=uHitPos && uTokenPos<=uHitPos + iHitSpan );
// 1d segments intersection
// token.pos + token.len vs hit.pos + hit.spanLen
return ( ( (int)(Min ( uTokenPos + tTok.m_iMultiPosLen, uHitPos + iHitSpan )) - (int)(Max ( uTokenPos, uHitPos )) )>=0 );
}
static void SplitSpaceIntoTokens ( CSphVector<Space_t> & dSpaces, const char * pDoc, int iStart, int iLen, int iBoundary = -1 )
{
// most frequent case
if ( sphIsSpace ( pDoc[iStart] ) && iLen==1 )
{
dSpaces.Resize(1);
dSpaces[0].m_iStartBytes = iStart;
dSpaces[0].m_iLengthBytes = 1;
dSpaces[0].m_iLengthCP = 1;
return;
}
dSpaces.Resize(0);
int iGapStart = iStart;
bool bWasSpace = sphIsSpace ( *(pDoc+iStart) );
bool bWasBoundary = iBoundary==iStart;
for ( int i=iStart; i<iStart+iLen; i++ )
{
bool bSpace = sphIsSpace ( *(pDoc+i) );
bool bBoundary = i==iBoundary;
if ( bSpace!=bWasSpace || bBoundary!=bWasBoundary )
{
Space_t & tLastSpace = dSpaces.Add();
tLastSpace.m_iStartBytes = iGapStart;
tLastSpace.m_iLengthBytes = i-iGapStart;
tLastSpace.m_iLengthCP = sphUTF8Len ( pDoc+tLastSpace.m_iStartBytes, tLastSpace.m_iLengthBytes );
bWasSpace = bSpace;
bWasBoundary = bBoundary;
iGapStart = i;
}
}
if ( iGapStart < iStart+iLen )
{
Space_t & tLastSpace = dSpaces.Add();
tLastSpace.m_iStartBytes = iGapStart;
tLastSpace.m_iLengthBytes = iStart+iLen-iGapStart;
tLastSpace.m_iLengthCP = sphUTF8Len ( pDoc+tLastSpace.m_iStartBytes, tLastSpace.m_iLengthBytes );
}
}
//////////////////////////////////////////////////////////////////////////
/// document token processor functor traits
class TokenFunctorTraits_c : public TokenFunctor_i
{
protected:
SnippetResult_t & m_tResult;
const SnippetQuerySettings_t & m_tQuery;
const CSphIndexSettings & m_tIndexSettings;
CSphVector<BYTE> & m_dResult;
TokenizerRefPtr_c m_pTokenizer;
const char * m_szDocBuffer = nullptr;
const char * m_pDoc = nullptr;
const char * m_pDocMax = nullptr;
int m_iDocLen = 0;
int m_iSeparatorLen;
int m_iField = 0;
TokenFunctorTraits_c ( TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const CSphIndexSettings & tIndexSettings, const char * szDoc, int iDocLen, int iField, SnippetResult_t & tRes );
void ResultEmit ( CSphVector<BYTE> & dBuf, const char * pSrc, int iLen, bool bHasPassageMacro=false, int iPassageId=0, const char * pPost=nullptr, int iPostLen=0 ) const;
void EmitPassageSeparator ( CSphVector<BYTE> & dBuf );
};
TokenFunctorTraits_c::TokenFunctorTraits_c ( TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const CSphIndexSettings & tIndexSettings, const char * szDoc, int iDocLen, int iField, SnippetResult_t & tRes )
: m_tResult ( tRes )
, m_tQuery ( tQuery )
, m_tIndexSettings ( tIndexSettings )
, m_dResult ( m_tResult.m_dFields[iField].m_dPassages[0].m_dText )
, m_pTokenizer ( std::move (pTokenizer) )
, m_szDocBuffer ( szDoc )
, m_iDocLen ( iDocLen )
, m_iField ( iField )
{
assert(m_pTokenizer);
m_pTokenizer->SetBuffer ( (BYTE*)const_cast<char*>(szDoc), m_iDocLen );
m_pDoc = m_pTokenizer->GetBufferPtr();
m_pDocMax = m_pTokenizer->GetBufferEnd();
m_iSeparatorLen = m_tQuery.m_sChunkSeparator.Length();
}
void TokenFunctorTraits_c::ResultEmit ( CSphVector<BYTE> & dBuf, const char * pSrc, int iLen, bool bHasPassageMacro, int iPassageId, const char * pPost, int iPostLen ) const
{
dBuf.Append ( pSrc, iLen );
if ( !bHasPassageMacro )
return;
char sBuf[16];
int iPassLen = snprintf ( sBuf, sizeof(sBuf), "%d", iPassageId );
dBuf.Append ( sBuf, iPassLen );
dBuf.Append ( pPost, iPostLen );
}
void TokenFunctorTraits_c::EmitPassageSeparator ( CSphVector<BYTE> & dBuf )
{
ResultEmit ( dBuf, m_tQuery.m_sChunkSeparator.cstr(), m_iSeparatorLen );
}
//////////////////////////////////////////////////////////////////////////
class HitTraits_c
{
protected:
const SphHitMark_t * m_pHit = nullptr;
const SphHitMark_t * m_pHitEnd = nullptr;
HitTraits_c ( const CSphVector<SphHitMark_t> & dHits );
void RewindHits ( DWORD uTokPos, int iField );
};
HitTraits_c::HitTraits_c ( const CSphVector<SphHitMark_t> & dHits )
: m_pHit ( dHits.Begin() )
, m_pHitEnd ( dHits.Begin() + dHits.GetLength() )
{}
void HitTraits_c::RewindHits ( DWORD uTokPos, int iField )
{
while ( m_pHit<m_pHitEnd && m_pHit->m_uPosition+m_pHit->m_uSpan<=HITMAN::Create ( iField, uTokPos ) )
m_pHit++;
}
//////////////////////////////////////////////////////////////////////////
/// functor that highlights the start of the document
class DocStartHighlighter_c : public TokenFunctorTraits_c
{
public:
DocStartHighlighter_c ( TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const SnippetLimits_t & tLimits, const CSphIndexSettings & tIndexSettings, const char * szDoc,
int iDocLen, int iField, int & iResultCP, SnippetResult_t & tRes );
bool OnToken ( const TokenInfo_t & tTok, const CSphVector<SphWordID_t> &, const CSphVector<int> * ) final;
bool OnOverlap ( int iStart, int iLen, int ) final;
void OnTail ( int iStart, int iLen, int ) final;
void OnSkipHtml ( int, int ) final;
void OnSPZ ( BYTE, DWORD, const char *, int ) final {}
void OnFinish () final {}
private:
const SnippetLimits_t & m_tLimits;
bool m_bCollectionStopped = false;
int & m_iResultLenCP;
CSphVector<Space_t> m_dSpaces;
void CollectStartTokens ( int iStart, int iLen, int iLenghCP = -1 );
void CollectStartSpaces();
};
DocStartHighlighter_c::DocStartHighlighter_c ( TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const SnippetLimits_t & tLimits, const CSphIndexSettings & tIndexSettings, const char * szDoc,
int iDocLen, int iField, int & iResultCP, SnippetResult_t & tRes )
: TokenFunctorTraits_c ( std::move ( pTokenizer ), tQuery, tIndexSettings, szDoc, iDocLen, iField, tRes )
, m_tLimits ( tLimits )
, m_iResultLenCP ( iResultCP )
{
assert ( !m_tQuery.m_bAllowEmpty );
}
bool DocStartHighlighter_c::OnToken ( const TokenInfo_t & tTok, const CSphVector<SphWordID_t> &, const CSphVector<int> * )
{
CollectStartTokens ( tTok.m_iStart, tTok.m_iLen );
return !m_bCollectionStopped;
}
bool DocStartHighlighter_c::OnOverlap ( int iStart, int iLen, int )
{
SplitSpaceIntoTokens ( m_dSpaces, m_pDoc, iStart, iLen );
CollectStartSpaces();
return !m_bCollectionStopped;
}
void DocStartHighlighter_c::OnTail ( int iStart, int iLen, int )
{
SplitSpaceIntoTokens ( m_dSpaces, m_pDoc, iStart, iLen );
CollectStartSpaces();
}
void DocStartHighlighter_c::OnSkipHtml ( int iStart, int iLen )
{
assert ( m_pDoc );
assert ( iStart>=0 && m_pDoc+iStart+iLen<=m_pDocMax );
CollectStartTokens ( iStart, iLen, -1 );
}
void DocStartHighlighter_c::CollectStartTokens ( int iStart, int iLen, int iLenghCP )
{
if ( m_tQuery.m_bAllowEmpty || m_bCollectionStopped )
return;
bool bLengthOk = true;
int iCalcLengthCP = 0;
if ( m_tLimits.m_iLimit>0 )
{
iCalcLengthCP = iLenghCP;
if ( iCalcLengthCP==-1 )
iCalcLengthCP = sphUTF8Len ( m_pDoc+iStart, iLen );
bLengthOk = m_iResultLenCP+iCalcLengthCP<=m_tLimits.m_iLimit;
}
if ( bLengthOk || !m_dResult.GetLength() )
{
ResultEmit ( m_dResult, m_pDoc+iStart, iLen );
m_iResultLenCP += iCalcLengthCP;
}
if ( !bLengthOk )
{
EmitPassageSeparator ( m_dResult );
m_bCollectionStopped = true;
}
}
void DocStartHighlighter_c::CollectStartSpaces()
{
if ( m_tQuery.m_bAllowEmpty || m_bCollectionStopped )
return;
ARRAY_FOREACH_COND ( i, m_dSpaces, !m_bCollectionStopped )
{
Space_t & tSpace = m_dSpaces[i];
CollectStartTokens ( tSpace.m_iStartBytes, tSpace.m_iLengthBytes, tSpace.m_iLengthCP );
}
}
//////////////////////////////////////////////////////////////////////////
/// functor that extracts passages for further highlighting
class PassageExtractor_c : public TokenFunctorTraits_c, public HitTraits_c
{
public:
PassageExtractor_c ( const SnippetsDocIndex_c & tContainer, PassageContext_t & tPassageContext, TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const SnippetLimits_t & tLimits,
const CSphIndexSettings & tIndexSettings, const char * szDoc, int iDocLen, const CSphVector<SphHitMark_t> & dHits, int iField, SnippetResult_t & tRes );
protected:
bool OnToken ( const TokenInfo_t & tTok, const CSphVector<SphWordID_t> & dTokens, const CSphVector<int> * ) final;
bool OnOverlap ( int iStart, int iLen, int iBoundary ) final;
void OnSkipHtml ( int iStart, int iLen ) final {}
void OnSPZ ( BYTE iSPZ, DWORD uPosition, const char * szZone, int iZone ) final;
void OnTail ( int iStart, int iLen, int iBoundary ) final;
void OnFinish() final;
private:
enum State_e
{
STATE_WINDOW_SETUP,
STATE_ADD_WORD
};
State_e m_eState = STATE_WINDOW_SETUP;
bool m_bQwordsChanged = true;
bool m_bAppendSentenceEnd = false;
const SnippetsDocIndex_c & m_tContainer;
SnippetLimits_t m_tLimits;
TokenSpan_c m_tSpan;
Passage_t m_tPass;
int m_iCurToken = 0;
CSphVector<Space_t> m_dSpaces;
CSphVector<BYTE> m_dStartResult;
int m_iThresh = 0;
PassageContext_t & m_tContext;
void AddSpaces ( int iBoundary );
void WeightAndSubmit();
void UpdateGaps ( int iMaxWords );
void FlushPassage();
void UpdateTopPassages ( int iQword, int iWeight );
void ShrinkSpanHead();
void CalcPassageWeight ( int iMaxWords );
void AppendBeforeAfterTokens ( Passage_t & tPassage, const TokenSpan_c & tSpan );
int GetSpanWordsLimit() const;
};
PassageExtractor_c::PassageExtractor_c ( const SnippetsDocIndex_c & tContainer, PassageContext_t & tPassageContext, TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const SnippetLimits_t & tLimits,
const CSphIndexSettings & tIndexSettings, const char * szDoc, int iDocLen, const CSphVector<SphHitMark_t> & dHits, int iField, SnippetResult_t & tRes )
: TokenFunctorTraits_c ( std::move (pTokenizer), tQuery, tIndexSettings, szDoc, iDocLen, iField, tRes )
, HitTraits_c ( dHits )
, m_tContainer ( tContainer )
, m_tLimits ( tLimits )
, m_tContext ( tPassageContext )
{
const int AVG_WORD_LEN = 5;
int iSpanSize = m_tLimits.m_iLimit ? 8*m_tLimits.m_iLimit/AVG_WORD_LEN : 2*m_tLimits.m_iLimitWords;
if ( !iSpanSize )
iSpanSize = 128;
m_tSpan.Init ( iSpanSize );
m_tPass.Reset();
if ( m_tLimits.m_iLimitPassages>0 )
m_iThresh = m_tLimits.m_iLimitPassages;
else if ( m_tLimits.m_iLimitWords>0 )
m_iThresh = m_tLimits.m_iLimitWords / 2;
else if ( m_tLimits.m_iLimit>0 )
m_iThresh = m_tLimits.m_iLimit / 4;
m_iThresh = 1 << sphLog2 ( m_iThresh );
}
bool PassageExtractor_c::OnToken ( const TokenInfo_t & tTok, const CSphVector<SphWordID_t> & dTokens, const CSphVector<int> * )
{
assert ( m_pDoc );
assert ( tTok.m_iStart>=0 && m_pDoc+tTok.m_iStart+tTok.m_iLen<=m_pDocMax );
bool bQWord = false;
int iTermIndex = -1;
RewindHits ( tTok.m_uPosition, m_iField );
if ( m_pHit<m_pHitEnd && IsTokenHit ( tTok, m_pHit->m_uPosition, m_pHit->m_uSpan, m_iField ) )
bQWord = true;
iTermIndex = tTok.m_iTermIndex;
int iLengthCP = sphUTF8Len ( m_pDoc+tTok.m_iStart, tTok.m_iLen );
switch ( m_eState )
{
case STATE_WINDOW_SETUP:
{
DWORD uAllQwords = ( 1 << m_tContainer.GetNumTerms() )-1;
const int iCpLimit = m_tLimits.m_iLimit ? m_tLimits.m_iLimit : INT_MAX;
bool bLimitsOk = m_tSpan.m_iCodes+iLengthCP<=iCpLimit && m_tSpan.m_iWords<=GetSpanWordsLimit();
if ( ( m_tQuery.m_bForceAllWords && m_tPass.m_uQwords==uAllQwords && !bLimitsOk ) ||
( !m_tQuery.m_bForceAllWords && !bLimitsOk ) )
{
m_bQwordsChanged = true;
WeightAndSubmit ();
m_eState = STATE_ADD_WORD;
}
m_tSpan.Add ( m_iCurToken, tTok.m_bWord || tTok.m_bStopWord, bQWord, iTermIndex, iLengthCP );
m_bQwordsChanged |= bQWord;
if ( m_eState==STATE_ADD_WORD && (tTok.m_bWord || tTok.m_bStopWord) )
{
m_bQwordsChanged |= bQWord;
ShrinkSpanHead();
WeightAndSubmit();
}
}
break;
case STATE_ADD_WORD:
m_tSpan.Add ( m_iCurToken, tTok.m_bWord || tTok.m_bStopWord, bQWord, iTermIndex, iLengthCP );
if ( tTok.m_bWord || tTok.m_bStopWord )
{
m_bQwordsChanged |= bQWord;
ShrinkSpanHead();
WeightAndSubmit();
}
break;
}
m_iCurToken++;
return true;
}
void PassageExtractor_c::OnFinish ()
{
switch ( m_eState )
{
case STATE_WINDOW_SETUP:
WeightAndSubmit();
break;
case STATE_ADD_WORD:
break;
}
m_tPass.Reset();
}
bool PassageExtractor_c::OnOverlap ( int iStart, int iLen, int iBoundary )
{
// most frequent case
if ( sphIsSpace ( m_pDoc[iStart] ) && iLen==1 && iBoundary<0 && m_eState==STATE_ADD_WORD )
{
m_tSpan.Add ( m_iCurToken++, false, false, -1, 1 );
if ( m_bAppendSentenceEnd )
OnSPZ ( MAGIC_CODE_SENTENCE, 0, NULL, -1 );
} else
{
SplitSpaceIntoTokens ( m_dSpaces, m_pDoc, iStart, iLen, iBoundary );
AddSpaces ( iBoundary );
}
return true;
}
void PassageExtractor_c::OnSPZ ( BYTE iSPZ, DWORD uPosition, const char * szZone, int iZone )
{
if ( m_bAppendSentenceEnd )
m_bAppendSentenceEnd = false;
else if ( iSPZ==MAGIC_CODE_SENTENCE )
{
m_bAppendSentenceEnd = true;
return;
}
switch ( m_eState )
{
case STATE_WINDOW_SETUP:
WeightAndSubmit();
break;
case STATE_ADD_WORD:
m_bQwordsChanged = true;
WeightAndSubmit();
m_eState = STATE_WINDOW_SETUP;
break;
}
m_bQwordsChanged = true;
m_tSpan.Reset();
}
void PassageExtractor_c::OnTail ( int iStart, int iLen, int iBoundary )
{
SplitSpaceIntoTokens ( m_dSpaces, m_pDoc, iStart, iLen, iBoundary );
AddSpaces ( iBoundary );
ShrinkSpanHead();
WeightAndSubmit();
}
void PassageExtractor_c::AddSpaces ( int iBoundary )
{
switch ( m_eState )
{
case STATE_WINDOW_SETUP:
{
DWORD uAllQwords = ( 1 << m_tContainer.GetNumTerms() )-1;
const int iCpLimit = m_tLimits.m_iLimit ? m_tLimits.m_iLimit : INT_MAX;
ARRAY_FOREACH ( i, m_dSpaces )
{
bool bBoundary = iBoundary==m_dSpaces[i].m_iStartBytes;
bool bLimitsOk = m_tSpan.m_iCodes+m_dSpaces[i].m_iLengthCP<=iCpLimit && m_tSpan.m_iWords<=GetSpanWordsLimit();
if ( m_tQuery.m_bForceAllWords && m_tPass.m_uQwords!=uAllQwords )
bLimitsOk = true;
if ( ( bBoundary || !bLimitsOk ) && m_eState!=STATE_ADD_WORD )
{
if ( bBoundary )
{
m_tSpan.Add ( m_iCurToken++, false, false, -1, m_dSpaces[i].m_iLengthCP );
WeightAndSubmit();
m_tSpan.Reset();
} else
{
WeightAndSubmit();
m_eState = STATE_ADD_WORD;
}
}
if ( !bBoundary )
m_tSpan.Add ( m_iCurToken++, false, false, -1, m_dSpaces[i].m_iLengthCP );
if ( m_bAppendSentenceEnd )
OnSPZ ( MAGIC_CODE_SENTENCE, 0, NULL, -1 );
}
}
break;
case STATE_ADD_WORD:
ARRAY_FOREACH ( i, m_dSpaces )
{
m_tSpan.Add ( m_iCurToken++, false, false, -1, m_dSpaces[i].m_iLengthCP );
if ( iBoundary==m_dSpaces[i].m_iStartBytes )
{
WeightAndSubmit ();
m_tSpan.Reset();
m_eState = STATE_WINDOW_SETUP;
}
if ( !i && m_bAppendSentenceEnd )
OnSPZ ( MAGIC_CODE_SENTENCE, 0, NULL, -1 );
}
break;
}
}
void PassageExtractor_c::WeightAndSubmit()
{
if ( m_tSpan.m_iQwords )
{
if ( m_bQwordsChanged )
{
CalcPassageWeight ( GetSpanWordsLimit() );
m_bQwordsChanged = false;
} else
UpdateGaps ( GetSpanWordsLimit() );
if ( m_tPass.m_uQwords )
FlushPassage();
}
}
void PassageExtractor_c::UpdateGaps ( int iMaxWords )
{
m_tPass.m_iMinGap = iMaxWords-1;
m_tPass.m_iAroundBefore = m_tPass.m_iAroundAfter = 0;
DWORD uQwords = 0;
int iWord = -1;
for ( int i = 0; i < m_tSpan.GetNumTokens(); i++ )
{
const SpanToken_t & tTok = m_tSpan.GetToken(i);
if ( !tTok.m_iWordFlag )
continue;
iWord++;
if ( tTok.m_uQwordMask )
{
m_tPass.m_iMinGap = Min ( m_tPass.m_iMinGap, iWord );
m_tPass.m_iMinGap = Min ( m_tPass.m_iMinGap, m_tSpan.m_iWords-1-iWord );
}
uQwords |= tTok.m_uQwordMask;
m_tPass.m_iAroundBefore += ( uQwords==0 );
m_tPass.m_iAroundAfter = ( tTok.m_uQwordMask ? 0 : m_tPass.m_iAroundAfter+1 );
}
assert ( m_tPass.m_iMinGap>=0 );
}
void PassageExtractor_c::FlushPassage()
{
m_tPass.m_iField = m_iField;
m_tPass.m_iStart = m_tSpan.m_iStart;
m_tPass.m_iTokens = m_tSpan.GetNumTokens();
m_tPass.m_iCodes = m_tSpan.m_iCodes;
m_tPass.m_iWords = m_tSpan.m_iWords;
int iBefore = 0;
while ( m_tPass.m_iAroundBefore>m_tQuery.m_iAround )
{
assert ( m_tPass.m_iStart<m_tPass.m_iStartLimit );
const SpanToken_t & tTok = m_tSpan.GetToken ( iBefore );
assert ( tTok.m_uQwordMask==0 );
m_tPass.m_iCodes -= tTok.m_iLengthCP;
m_tPass.m_iAroundBefore -= tTok.m_iWordFlag;
m_tPass.m_iStart++;
m_tPass.m_iTokens--;
m_tPass.m_iWords -= tTok.m_iWordFlag;
iBefore++;
}
int iAfter = m_tSpan.GetNumTokens()-1;
while ( m_tPass.m_iAroundAfter>m_tQuery.m_iAround )
{
assert ( m_tPass.m_iEndLimit<m_tPass.m_iStart+m_tPass.m_iTokens-1 );
const SpanToken_t & tTok = m_tSpan.GetToken ( iAfter );
assert ( tTok.m_uQwordMask==0 );
m_tPass.m_iCodes -= tTok.m_iLengthCP;
m_tPass.m_iAroundAfter -= tTok.m_iWordFlag;
m_tPass.m_iTokens--;
m_tPass.m_iWords -= tTok.m_iWordFlag;
iAfter--;
}
// if it's the very first one, do add
if ( !m_tContext.m_dPassages.GetLength() )
{
Passage_t & tPassage = m_tContext.m_dPassages.Add();
tPassage.CopyData ( m_tPass );
AppendBeforeAfterTokens ( tPassage, m_tSpan );
UpdateTopPassages ( m_tPass.m_iQwordCount==1 ? sphLog2 ( m_tPass.m_uQwords )-1 : -1, m_tPass.GetWeight() );
return;
}
// check if it's new or better than the last one
Passage_t & tLast = m_tContext.m_dPassages.Last();
if ( ( m_tPass.m_iStartLimit<=tLast.m_iStartLimit && tLast.m_iEndLimit<=m_tPass.m_iEndLimit )
|| ( tLast.m_iStartLimit<=m_tPass.m_iStartLimit && m_tPass.m_iEndLimit<=tLast.m_iEndLimit ) )
{
// overlapping passages, check which one is better centered
int iPassPre = m_tPass.m_iStartLimit - m_tPass.m_iStart + 1;
int iPassPost = m_tPass.m_iStart + m_tPass.m_iTokens - m_tPass.m_iEndLimit + 1;
float fPassGap = (float)Max ( iPassPre, iPassPost ) / (float)Min ( iPassPre, iPassPost );
int iLastPre = tLast.m_iStartLimit - tLast.m_iStart + 1;
int iLastPost = tLast.m_iStart + tLast.m_iTokens - tLast.m_iEndLimit + 1;
float fLastGap = (float)Max ( iLastPre, iLastPost ) / (float)Min ( iLastPre, iLastPost );
int iWeightLast = tLast.GetWeight();
int iWeightPass = m_tPass.GetWeight();
// centered snippet wins last passage
if ( tLast.m_iUniqQwords<=m_tPass.m_iUniqQwords &&
( iWeightLast<iWeightPass || ( iWeightLast==iWeightPass && fPassGap<fLastGap ) ) )
{
tLast.CopyData ( m_tPass );
AppendBeforeAfterTokens ( tLast, m_tSpan );
}
return;
}
// after a certain threshold, start being picky
// only accept passages with new keywords, or big enough weight
int iWeight = m_tPass.GetWeight();
int iQword = -1;
if ( m_tPass.m_iQwordCount==1 )
iQword = sphLog2 ( m_tPass.m_uQwords )-1;
while ( m_tContext.m_dPassages.GetLength()>m_iThresh )
{
// completely new keyword? accept
if ( m_tPass.m_uQwords & ~m_tContext.m_uPassagesQwords )
break;
// single keyword passage? accept if better weight, otherwise reject
if ( iQword>=0 )
{
if ( iWeight<=m_tContext.m_dQwordWeights[iQword] )
return;
break;
}
// multi-keyword passage? accept if weight within top-N
assert ( iQword<0 );
if ( iWeight<=m_tContext.m_dTopPassageWeights[m_iThresh] )
return;
break;
}
// kill them all, god will know his own
Passage_t & tPassage = m_tContext.m_dPassages.Add();
tPassage.CopyData ( m_tPass );
AppendBeforeAfterTokens ( tPassage, m_tSpan );
UpdateTopPassages ( iQword, iWeight );
}
void PassageExtractor_c::UpdateTopPassages ( int iQword, int iWeight )
{
m_tContext.m_uPassagesQwords |= m_tPass.m_uQwords;
if ( iQword>=0 )
m_tContext.m_dQwordWeights[iQword] = Max ( m_tContext.m_dQwordWeights[iQword], iWeight );
m_tContext.m_dTopPassageWeights.Add ( iWeight );
if ( ( m_tContext.m_dTopPassageWeights.GetLength() & ( m_iThresh-1 ) )==0 )
m_tContext.m_dTopPassageWeights.RSort();
}
void PassageExtractor_c::ShrinkSpanHead()
{
const int iCpLimit = m_tLimits.m_iLimit ? m_tLimits.m_iLimit : INT_MAX;
int iTokenStart = 0;
const int iMaxToken = m_tSpan.GetNumTokens() - 1;
// drop front tokens until the window fits into both word and CP limits
while ( iTokenStart < iMaxToken
&& ( m_tSpan.m_iCodes > iCpLimit || m_tSpan.m_iWords > GetSpanWordsLimit () ) )
{
const SpanToken_t & tTok = m_tSpan.GetToken ( iTokenStart );
if ( tTok.m_uQwordMask )
{
m_tSpan.m_iQwords--; // FIXME? might not be true if we remove a duped keyword
m_bQwordsChanged = true;
}
m_tSpan.m_iWords -= tTok.m_iWordFlag;
m_tSpan.m_iCodes -= tTok.m_iLengthCP;
iTokenStart++;
}
m_tSpan.m_iStart += iTokenStart;
// remove extra tokens
if ( iTokenStart>=m_tSpan.GetNumTokens() )
m_tSpan.Reset();
else if ( iTokenStart>0 )
m_tSpan.RemoveStartingTokens ( iTokenStart );
}
void PassageExtractor_c::CalcPassageWeight ( int iMaxWords )
{
DWORD uLast = 0;
int iLCS = 1;
m_tPass.m_iMaxLCS = 1;
m_tPass.m_uQwords = 0;
m_tPass.m_iMinGap = iMaxWords-1;
m_tPass.m_iStartLimit = INT_MAX;
m_tPass.m_iEndLimit = INT_MIN;
m_tPass.m_iAroundBefore = m_tPass.m_iAroundAfter = 0;
m_tPass.m_iQwordCount = 0;
m_tPass.m_iUniqQwords = 0;
m_tPass.m_iQwordsWeight = 0;
int iWord = -1;
for ( int i = 0; i < m_tSpan.GetNumTokens(); i++ )
{
const SpanToken_t & tTok = m_tSpan.GetToken(i);
if ( !tTok.m_iWordFlag )
continue;
iWord++;
// update mask
m_tPass.m_uQwords |= tTok.m_uQwordMask;
// update match boundary
if ( tTok.m_uQwordMask )
{
int iTok = m_tSpan.m_iStart+i;
m_tPass.m_iStartLimit = Min ( m_tPass.m_iStartLimit, iTok );
m_tPass.m_iEndLimit = Max ( m_tPass.m_iEndLimit, iTok );
m_tPass.m_iQwordCount++;
}
// update LCS
uLast = tTok.m_uQwordMask & ( uLast<<1 );
if ( uLast )
{
iLCS++;
m_tPass.m_iMaxLCS = Max ( iLCS, m_tPass.m_iMaxLCS );
} else
{
iLCS = 1;
uLast = tTok.m_uQwordMask;
}
// update min gap
if ( tTok.m_uQwordMask )
{
m_tPass.m_iMinGap = Min ( m_tPass.m_iMinGap, iWord );
m_tPass.m_iMinGap = Min ( m_tPass.m_iMinGap, m_tSpan.m_iWords-1-iWord );
}
m_tPass.m_iAroundBefore += ( m_tPass.m_uQwords==0 );
m_tPass.m_iAroundAfter = (tTok.m_uQwordMask ? 0 : m_tPass.m_iAroundAfter+1 );
}
assert ( m_tPass.m_iMinGap>=0 );
assert ( m_tSpan.m_iWords==iWord+1 );
// we do it only once because we don't need duplicate weights
// but m_iQwordCount will still show the total amount of qwords w/dupes
DWORD uWords = m_tPass.m_uQwords;
for ( iWord=0; uWords; uWords >>= 1, iWord++ )
if ( uWords & 1 )
{
m_tPass.m_iQwordsWeight += m_tContainer.GetTermWeight(iWord);
m_tPass.m_iUniqQwords++;
}
// total number of words is important too, so lets boost it a bit
m_tPass.m_iQwordCount *= 2;
}
void PassageExtractor_c::AppendBeforeAfterTokens ( Passage_t & tPassage, const TokenSpan_c & tSpan )
{
// maybe we don't need no extra token info
if ( ( ( m_tLimits.m_iLimit==0 || m_tLimits.m_iLimit>=m_iDocLen ) && !m_tLimits.m_iLimitWords && m_tQuery.m_ePassageSPZ==SPH_SPZ_NONE ) || m_tQuery.m_bUseBoundaries )
return;
tPassage.m_iCodesBetweenKeywords = tPassage.m_iCodes;
tPassage.m_iWordsBetweenKeywords = tPassage.m_iWords;
tPassage.m_dBeforeTokens.Resize(0);
tPassage.m_dAfterTokens.Resize(0);
tPassage.m_dBeforeTokens.Reserve ( (m_tQuery.m_iAround+1)*2 );
tPassage.m_dAfterTokens.Reserve ( (m_tQuery.m_iAround+1)*2 );
int iBefore = 0;
for ( int i = tPassage.m_iStartLimit-tSpan.m_iStart-1; i>=tPassage.m_iStart-m_tSpan.m_iStart; i-- )
{
const SpanToken_t & tTok = tSpan.GetToken(i);
tPassage.m_iCodesBetweenKeywords -= tTok.m_iLengthCP;
tPassage.m_iWordsBetweenKeywords -= tTok.m_iWordFlag;
if ( iBefore+tTok.m_iWordFlag<=tPassage.m_iAroundBefore )
{
StoredExcerptToken_t & tBeforeToken = tPassage.m_dBeforeTokens.Add();
tBeforeToken.m_iWordFlag = tTok.m_iWordFlag;
tBeforeToken.m_iLengthCP = tTok.m_iLengthCP;
iBefore += tTok.m_iWordFlag;
} else
break;
}
int iAfter = 0;
for ( int i = tPassage.m_iEndLimit-tSpan.m_iStart+1; i < tPassage.m_iTokens; i++ )
{
const SpanToken_t & tTok = tSpan.GetToken(i);
BYTE iWordFlag = tSpan.GetToken(i).m_iWordFlag;
tPassage.m_iCodesBetweenKeywords -= tTok.m_iLengthCP;
tPassage.m_iWordsBetweenKeywords -= iWordFlag;
if ( iAfter+iWordFlag<=tPassage.m_iAroundAfter )
{
StoredExcerptToken_t & tAfterToken = tPassage.m_dAfterTokens.Add();
tAfterToken.m_iWordFlag = tTok.m_iWordFlag;
tAfterToken.m_iLengthCP = tTok.m_iLengthCP;
iAfter += iWordFlag;
} else
break;
}
assert ( tPassage.m_iWordsBetweenKeywords>0 && tPassage.m_iCodesBetweenKeywords>0 );
}
int PassageExtractor_c::GetSpanWordsLimit() const
{
return m_tLimits.m_iLimitWords ? m_tLimits.m_iLimitWords : 2*m_tQuery.m_iAround + m_tSpan.m_iQwords;
}
//////////////////////////////////////////////////////////////////////////
class BeforeAfterTraits_c
{
protected:
int m_iBeforeLen;
int m_iAfterLen;
int m_iBeforePostLen;
int m_iAfterPostLen;
int m_iPassageId;
BeforeAfterTraits_c ( const SnippetQuerySettings_t & tQuery )
: m_iBeforeLen ( tQuery.m_sBeforeMatch.Length() )
, m_iAfterLen ( tQuery.m_sAfterMatch.Length() )
, m_iBeforePostLen ( tQuery.m_sBeforeMatchPassage.Length() )
, m_iAfterPostLen ( tQuery.m_sAfterMatchPassage.Length() )
, m_iPassageId ( tQuery.m_iPassageId )
{}
};
/// functor that highlights selected passages
class PassageHighlighter_c : public TokenFunctorTraits_c, public BeforeAfterTraits_c, public HitTraits_c
{
public:
PassageHighlighter_c ( CSphVector<Passage_t*> & dPassages, TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const CSphIndexSettings & tIndexSettings, const char * szDoc, int iDocLen,
const CSphVector<SphHitMark_t> & dHits, const FunctorZoneInfo_t & tZoneInfo, int iField, SnippetResult_t & tRes );
protected:
bool OnToken ( const TokenInfo_t & tTok, const CSphVector<SphWordID_t> &, const CSphVector<int> * ) final;
bool OnOverlap ( int iStart, int iLen, int iBoundary ) final;
void OnSkipHtml ( int iStart, int iLen ) final;
void OnSPZ ( BYTE, DWORD, const char *, int ) final {}
void OnTail ( int iStart, int iLen, int iBoundary ) final;
void OnFinish() final;
private:
CSphVector<Passage_t*> & m_dPassages;
CSphVector<PassageResult_t> & m_dPassageText;
CSphVector<BYTE> * m_pCurPassageText = nullptr;
int m_iCurToken = 0;
int m_iCurPassage = -1;
int m_iOpenUntilTokenPos = 0;
int m_iOpenUntilTokenNum = 0;
int m_iLastPos = 0;
TokenSpan_c m_tTmpSpan;
CSphVector<Space_t> m_dSpaces;
const FunctorZoneInfo_t & m_tZoneInfo;
void EmitZoneName ( int iStart ) const;
void EmitSpaces ( int iStart, int iLen, int iBoundary );
void UpdatePassage ( int iStart );
void CheckClose ( int iPos );
};
PassageHighlighter_c::PassageHighlighter_c ( CSphVector<Passage_t*> & dPassages, TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const CSphIndexSettings & tIndexSettings, const char * szDoc, int iDocLen,
const CSphVector<SphHitMark_t> & dHits, const FunctorZoneInfo_t & tZoneInfo, int iField, SnippetResult_t & tRes )
: TokenFunctorTraits_c ( std::move (pTokenizer), tQuery, tIndexSettings, szDoc, iDocLen, iField, tRes )
, BeforeAfterTraits_c(tQuery)
, HitTraits_c(dHits)
, m_dPassages ( dPassages )
, m_dPassageText ( tRes.m_dFields[iField].m_dPassages )
, m_tZoneInfo ( tZoneInfo )
{
m_dPassageText.Resize ( dPassages.GetLength() );
// we don't want these separators in other functors
for ( auto & i : m_dPassageText )
{
i.m_bStartSeparator = true;
i.m_bEndSeparator = true;
}
}
bool PassageHighlighter_c::OnToken ( const TokenInfo_t & tTok, const CSphVector<SphWordID_t> &, const CSphVector<int> * )
{
assert ( m_pDoc );
assert ( tTok.m_iStart>=0 && m_pDoc+tTok.m_iStart+tTok.m_iLen<=m_pDocMax );
CheckClose ( tTok.m_uPosition );
UpdatePassage ( tTok.m_iStart );
if ( m_iCurPassage!=-1 )
{
assert ( m_pCurPassageText );
RewindHits ( tTok.m_uPosition, m_iField );
bool bHit = m_pHit<m_pHitEnd && IsTokenHit ( tTok, m_pHit->m_uPosition, m_pHit->m_uSpan - 1, m_iField );
if ( bHit && !m_iOpenUntilTokenPos )
{
ResultEmit ( *m_pCurPassageText, m_tQuery.m_sBeforeMatch.cstr(), m_iBeforeLen, m_tQuery.m_bHasBeforePassageMacro, m_iPassageId, m_tQuery.m_sBeforeMatchPassage.cstr(), m_iBeforePostLen );
m_iOpenUntilTokenPos = HITMAN::GetPos(m_pHit->m_uPosition)+m_pHit->m_uSpan;
m_iOpenUntilTokenNum = m_dPassages[m_iCurPassage]->m_iStart+m_dPassages[m_iCurPassage]->m_iTokens;
}
// emit token itself
ResultEmit ( *m_pCurPassageText, m_pDoc+tTok.m_iStart, tTok.m_iLen );
}
m_iLastPos = tTok.m_uPosition;
m_iCurToken++;
return true;
}
bool PassageHighlighter_c::OnOverlap ( int iStart, int iLen, int iBoundary )
{
EmitSpaces ( iStart, iLen, iBoundary );
return true;
}
void PassageHighlighter_c::OnSkipHtml ( int iStart, int iLen )
{
assert ( m_pDoc );
assert ( iStart>=0 && m_pDoc+iStart+iLen<=m_pDocMax );
assert ( m_pCurPassageText );
ResultEmit ( *m_pCurPassageText, m_pDoc+iStart, iLen );
}
void PassageHighlighter_c::OnTail ( int iStart, int iLen, int iBoundary )
{
CheckClose ( m_iLastPos+1 );
EmitSpaces ( iStart, iLen, iBoundary );
}
void PassageHighlighter_c::OnFinish()
{
if ( m_iOpenUntilTokenPos )
{
assert ( m_pCurPassageText );
ResultEmit ( *m_pCurPassageText, m_tQuery.m_sAfterMatch.cstr(), m_iAfterLen, m_tQuery.m_bHasAfterPassageMacro, m_iPassageId++, m_tQuery.m_sAfterMatchPassage.cstr(), m_iAfterPostLen );
}
if ( m_iCurPassage!=-1 )
m_dPassageText[m_iCurPassage].m_bEndSeparator = false;
ARRAY_FOREACH ( i, m_dPassages )
m_dPassageText[i].m_iWeight = m_dPassages[i]->GetWeight();
}
void PassageHighlighter_c::EmitZoneName ( int iStart ) const
{
if ( !m_tQuery.m_bEmitZones || !m_tZoneInfo.m_dZonePos.GetLength() )
return;
int iZone = FindSpan ( m_tZoneInfo.m_dZonePos, iStart );
if ( iZone!=-1 )
{
int iParent = m_tZoneInfo.m_dZoneParent[iZone];
for ( const auto & tZone : m_tZoneInfo.m_hZones )
{
if ( tZone.second!=iParent )
continue;
assert ( m_pCurPassageText );
ResultEmit ( *m_pCurPassageText, "<", 1 );
ResultEmit ( *m_pCurPassageText, tZone.first.cstr(), tZone.first.Length() );
ResultEmit ( *m_pCurPassageText, ">", 1 );
break;
}
}
}
void PassageHighlighter_c::EmitSpaces ( int iStart, int iLen, int iBoundary )
{
assert ( m_pDoc );
assert ( iStart>=0 && m_pDoc+iStart+iLen<=m_pDocMax );
SplitSpaceIntoTokens ( m_dSpaces, m_pDoc, iStart, iLen, iBoundary );
ARRAY_FOREACH ( i, m_dSpaces )
{
CheckClose ( m_iLastPos+1 );
UpdatePassage ( iStart );
if ( m_iCurPassage!=-1 )
{
assert ( m_pCurPassageText );
ResultEmit ( *m_pCurPassageText, m_pDoc+m_dSpaces[i].m_iStartBytes, m_dSpaces[i].m_iLengthBytes );
}
m_iCurToken++;
}
}
void PassageHighlighter_c::UpdatePassage ( int iStart )
{
const Passage_t * pPassage = ( m_iCurPassage==-1 ) ? nullptr : *( m_dPassages.Begin ()+m_iCurPassage );
int iPassage = m_iCurPassage;
if ( m_iCurPassage==-1 || m_iCurToken<pPassage->m_iStart || m_iCurToken>( pPassage->m_iStart + pPassage->m_iTokens - 1 ) )
{
int iNextPassage = 0;
if ( m_iCurPassage!=-1 && m_iCurToken>( pPassage->m_iStart + pPassage->m_iTokens - 1 ) )
iNextPassage = m_iCurPassage+1;
m_iCurPassage = -1;
for ( int i=iNextPassage; i<m_dPassages.GetLength(); i++ )
if ( m_iCurToken>=m_dPassages[i]->m_iStart && m_iCurToken<=( m_dPassages[i]->m_iStart + m_dPassages[i]->m_iTokens - 1 ) )
{
m_iCurPassage = i;
break;
}
}
if ( !m_iCurPassage && !m_iCurToken )
m_dPassageText[m_iCurPassage].m_bStartSeparator = false;
if ( m_iCurPassage!=-1 && iPassage!=m_iCurPassage )
{
m_pCurPassageText = &(m_dPassageText[m_iCurPassage].m_dText);
EmitZoneName ( iStart );
}
}
void PassageHighlighter_c::CheckClose ( int iPos )
{
// marker folding, emit "after" marker at span end only
if ( ( !m_iOpenUntilTokenPos || iPos<m_iOpenUntilTokenPos ) && ( !m_iOpenUntilTokenNum || m_iCurToken<m_iOpenUntilTokenNum ) )
return;
assert ( m_pCurPassageText );
ResultEmit ( *m_pCurPassageText, m_tQuery.m_sAfterMatch.cstr(), m_iAfterLen, m_tQuery.m_bHasAfterPassageMacro, m_iPassageId++, m_tQuery.m_sAfterMatchPassage.cstr(), m_iAfterPostLen );
m_iOpenUntilTokenPos = m_iOpenUntilTokenNum = 0;
}
//////////////////////////////////////////////////////////////////////////
/// functor that matches tokens against hit positions from mini-index and highlights them
class QueryHighlighter_c : public TokenFunctorTraits_c, public BeforeAfterTraits_c, public HitTraits_c
{
public:
QueryHighlighter_c ( TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const CSphIndexSettings & tIndexSettings,
const char * szDoc, int iDocLen, const CSphVector<SphHitMark_t> & dHits, int iField, SnippetResult_t & tRes );
protected:
bool OnToken ( const TokenInfo_t & tTok, const CSphVector<SphWordID_t> &, const CSphVector<int> * ) final;
bool OnOverlap ( int iStart, int iLen, int ) final;
void OnSkipHtml ( int iStart, int iLen ) final;
void OnSPZ ( BYTE, DWORD, const char *, int ) final {}
void OnTail ( int iStart, int iLen, int ) final;
void OnFinish() final;
private:
int m_iOpenUntilTokenPos = 0; // blend-chars has same positions as blend-part tokens
int m_iLastPos = 0;
int m_iMatches = 0;
void CheckClose ( int iPos );
};
QueryHighlighter_c::QueryHighlighter_c ( TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const CSphIndexSettings & tIndexSettings,
const char * szDoc, int iDocLen, const CSphVector<SphHitMark_t> & dHits, int iField, SnippetResult_t & tRes )
: TokenFunctorTraits_c ( std::move (pTokenizer), tQuery, tIndexSettings, szDoc, iDocLen, iField, tRes )
, BeforeAfterTraits_c(tQuery)
, HitTraits_c(dHits)
{
m_dResult.Reserve ( 1024 );
}
bool QueryHighlighter_c::OnToken ( const TokenInfo_t & tTok, const CSphVector<SphWordID_t> &, const CSphVector<int> * )
{
assert ( m_pDoc );
assert ( tTok.m_iStart>=0 && m_pDoc+tTok.m_iStart+tTok.m_iLen<=m_pDocMax );
RewindHits ( tTok.m_uPosition, m_iField );
CheckClose ( tTok.m_uPosition );
// marker folding, emit "before" marker at span start only
// tmg note: stopwords with step 0 resets m_iOpenUntilTokenPos and breaks highligh of spans of tokens
if ( m_pHit<m_pHitEnd && IsTokenHit ( tTok, m_pHit->m_uPosition, m_pHit->m_uSpan, m_iField ) && !m_iOpenUntilTokenPos )
{
ResultEmit ( m_dResult, m_tQuery.m_sBeforeMatch.cstr(), m_iBeforeLen, m_tQuery.m_bHasBeforePassageMacro, m_iPassageId, m_tQuery.m_sBeforeMatchPassage.cstr(), m_iBeforePostLen );
m_iMatches++;
m_iOpenUntilTokenPos = HITMAN::GetPos(m_pHit->m_uPosition)+m_pHit->m_uSpan;
}
// emit token itself
ResultEmit ( m_dResult, m_pDoc+tTok.m_iStart, tTok.m_iLen );
m_iLastPos = tTok.m_uPosition + Max ( tTok.m_iMultiPosLen-1, 0 );
return true;
}
void QueryHighlighter_c::OnTail ( int iStart, int iLen, int )
{
assert ( m_pDoc );
assert ( iStart>=0 && m_pDoc+iStart+iLen<=m_pDocMax );
CheckClose ( m_iLastPos+1 );
ResultEmit ( m_dResult, m_pDoc+iStart, iLen );
}
void QueryHighlighter_c::OnFinish()
{
if ( !m_iMatches && m_tQuery.m_bAllowEmpty )
{
m_dResult.Reset();
return;
}
if ( !m_iOpenUntilTokenPos )
return;
ResultEmit ( m_dResult, m_tQuery.m_sAfterMatch.cstr(), m_iAfterLen, m_tQuery.m_bHasAfterPassageMacro, m_iPassageId++, m_tQuery.m_sAfterMatchPassage.cstr(), m_iAfterPostLen );
}
bool QueryHighlighter_c::OnOverlap ( int iStart, int iLen, int )
{
assert ( m_pDoc );
assert ( iStart>=0 && m_pDoc+iStart+iLen<=m_pDocMax );
CheckClose ( m_iLastPos+1 );
ResultEmit ( m_dResult, m_pDoc+iStart, iLen );
return true;
}
void QueryHighlighter_c::OnSkipHtml ( int iStart, int iLen )
{
assert ( m_pDoc );
assert ( iStart>=0 && m_pDoc+iStart+iLen<=m_pDocMax );
CheckClose ( m_iLastPos+1 );
ResultEmit ( m_dResult, m_pDoc+iStart, iLen );
}
void QueryHighlighter_c::CheckClose ( int iPos )
{
// marker folding, emit "after" marker at span end only
if ( !m_iOpenUntilTokenPos || iPos<m_iOpenUntilTokenPos )
return;
ResultEmit ( m_dResult, m_tQuery.m_sAfterMatch.cstr(), m_iAfterLen, m_tQuery.m_bHasAfterPassageMacro, m_iPassageId++, m_tQuery.m_sAfterMatchPassage.cstr(), m_iAfterPostLen );
m_iOpenUntilTokenPos = 0;
}
//////////////////////////////////////////////////////////////////////////
/// functor that processes tokens and collects matching keyword hits into mini-index
class HitCollector_c : public TokenFunctorTraits_c, public virtual HitCollector_i
{
public:
HitCollector_c ( SnippetsDocIndex_c & tContainer, TokenizerRefPtr_c pTokenizer, DictRefPtr_c pDict, const SnippetQuerySettings_t & tQuery, const CSphIndexSettings & tIndexSettings,
const char * szDoc, int iDocLen, int iField, CacheStreamer_i & tTokenContainer, CSphVector<ZonePacked_t> & dZones, FunctorZoneInfo_t & tZoneInfo, SnippetResult_t & tRes );
protected:
bool OnToken ( const TokenInfo_t & tTok, const CSphVector<SphWordID_t> & dTokens, const CSphVector<int> * pMultiPosDelta ) final;
bool OnOverlap ( int iStart, int iLen, int iBoundary ) final;
void OnSPZ ( BYTE iSPZ, DWORD uPosition, const char * sZoneName, int iZone ) final;
void OnSkipHtml ( int iStart, int iLen ) final;
void OnTail ( int iStart, int iLen, int iBoundary ) final;
void OnFinish () final {}
DictRefPtr_c & GetDict() final { return m_pDict; }
TokenizerRefPtr_c & GetTokenizer() final { return m_pTokenizer; }
const CSphIndexSettings & GetIndexSettings() final { return m_tIndexSettings; }
const SnippetQuerySettings_t & GetSnippetQuery() final { return m_tQuery; }
CSphVector<ZonePacked_t> & GetZones() final { return m_dZones; }
FunctorZoneInfo_t & GetZoneInfo() final { return m_tZoneInfo; }
bool NeedExtraZoneInfo() const final { return m_bCollectExtraZoneInfo; }
DWORD GetFoundWords() const final { return m_uFoundWords; }
private:
SnippetsDocIndex_c & m_tContainer;
CacheStreamer_i & m_tTokenContainer;
CSphVector<ZonePacked_t> & m_dZones;
FunctorZoneInfo_t & m_tZoneInfo;
bool m_bCollectExtraZoneInfo = false;
DictRefPtr_c m_pDict;
mutable BYTE m_sTmpWord [ 3*SPH_MAX_WORD_LEN + 16 ];
SphWordID_t m_uSentenceID;
SphWordID_t m_uParagraphID;
DWORD m_uFoundWords = 0;
};
HitCollector_c::HitCollector_c ( SnippetsDocIndex_c & tContainer, TokenizerRefPtr_c pTokenizer, DictRefPtr_c pDict, const SnippetQuerySettings_t & tQuery, const CSphIndexSettings & tIndexSettings,
const char * szDoc, int iDocLen, int iField, CacheStreamer_i & tTokenContainer, CSphVector<ZonePacked_t> & dZones, FunctorZoneInfo_t & tZoneInfo, SnippetResult_t & tRes )
: TokenFunctorTraits_c ( std::move (pTokenizer), tQuery, tIndexSettings, szDoc, iDocLen, iField, tRes )
, m_tContainer ( tContainer )
, m_tTokenContainer ( tTokenContainer )
, m_dZones ( dZones )
, m_tZoneInfo ( tZoneInfo )
, m_pDict ( std::move (pDict) )
{
assert ( m_pDict );
strncpy ( (char *)m_sTmpWord, MAGIC_WORD_SENTENCE, sizeof(m_sTmpWord)-1 );
m_uSentenceID = m_pDict->GetWordID ( m_sTmpWord );
strncpy ( (char *)m_sTmpWord, MAGIC_WORD_PARAGRAPH, sizeof(m_sTmpWord)-1 );
m_uParagraphID = m_pDict->GetWordID ( m_sTmpWord );
m_tContainer.SetupHits();
m_bCollectExtraZoneInfo = true;
}
bool HitCollector_c::OnToken ( const TokenInfo_t & tTok, const CSphVector<SphWordID_t> & dTokens, const CSphVector<int> * pMultiPosDelta )
{
bool bReal = false;
assert ( tTok.m_iMultiPosLen==0 || ( pMultiPosDelta && pMultiPosDelta->GetLength()==dTokens.GetLength()+1 ) );
bool bMultiform = ( tTok.m_iMultiPosLen!=0 );
int iPos = tTok.m_uPosition;
// different paths for leading token and tokens position generation
// for blended and multi word-form with multiple destination word-forms
if ( !bMultiform && tTok.m_uWordId )
{
m_tContainer.AddHits ( tTok.m_uWordId, tTok.m_sWord, tTok.m_iLen, HITMAN::Create ( m_iField, iPos ) );
bReal = true;
}
ARRAY_FOREACH ( i, dTokens )
{
if ( dTokens[i] )
{
if ( bMultiform )
iPos += ( *pMultiPosDelta )[i];
m_tContainer.AddHits ( dTokens[i], tTok.m_sWord, tTok.m_iLen, HITMAN::Create ( m_iField, iPos ) );
bReal = true;
}
}
if ( bMultiform && tTok.m_uWordId )
{
if ( bMultiform )
iPos += pMultiPosDelta->Last();
m_tContainer.AddHits ( tTok.m_uWordId, tTok.m_sWord, tTok.m_iLen, HITMAN::Create ( m_iField, iPos ) );
bReal = true;
}
m_tContainer.SetLastPos ( bReal ? iPos : m_tContainer.GetLastPos() );
int iTermIndex = m_tContainer.FindWord ( tTok.m_uWordId, tTok.m_sWord, tTok.m_iLen );
ARRAY_FOREACH_COND ( i, dTokens, iTermIndex==-1 )
iTermIndex = m_tContainer.FindWord ( dTokens[i], NULL, 0 );
m_uFoundWords |= iTermIndex==-1 ? 0 : 1 << iTermIndex;
m_tTokenContainer.StoreToken ( tTok, iTermIndex );
return true;
}
void HitCollector_c::OnSPZ ( BYTE iSPZ, DWORD uPosition, const char * sZoneName, int iZone )
{
switch ( iSPZ )
{
case MAGIC_CODE_SENTENCE:
m_tContainer.AddHits ( m_uSentenceID, NULL, 0, HITMAN::Create ( m_iField, uPosition ) );
break;
case MAGIC_CODE_PARAGRAPH:
m_tContainer.AddHits ( m_uParagraphID, NULL, 0, HITMAN::Create ( m_iField, uPosition ) );
break;
case MAGIC_CODE_ZONE:
assert ( m_dZones.GetLength() );
assert ( ( ( m_dZones.Last()>>32 ) & UINT32_MASK )==uPosition );
assert ( sZoneName );
m_tContainer.AddHits ( m_pDict->GetWordID ( (BYTE *)const_cast<char*>(sZoneName) ), NULL, 0, HITMAN::Create ( m_iField, uPosition ) );
break;
default: assert ( 0 && "impossible SPZ" );
}
m_tContainer.SetLastPos(uPosition);
m_tTokenContainer.StoreSPZ ( iSPZ, uPosition, sZoneName, iZone );
}
bool HitCollector_c::OnOverlap ( int iStart, int iLen, int iBoundary )
{
m_tTokenContainer.StoreOverlap ( iStart, iLen, iBoundary );
return true;
}
void HitCollector_c::OnSkipHtml ( int iStart, int iLen )
{
m_tTokenContainer.StoreSkipHtml ( iStart, iLen );
}
void HitCollector_c::OnTail ( int iStart, int iLen, int iBoundary )
{
m_tTokenContainer.StoreTail ( iStart, iLen, iBoundary );
}
//////////////////////////////////////////////////////////////////////////
std::unique_ptr<TokenFunctor_i> CreateDocStartHighlighter ( TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const SnippetLimits_t & tLimits, const CSphIndexSettings & tIndexSettings, const char * szDoc,
int iDocLen, int iField, int & iResultCP, SnippetResult_t & tRes )
{
return std::make_unique<DocStartHighlighter_c> ( std::move ( pTokenizer ), tQuery, tLimits, tIndexSettings, szDoc, iDocLen, iField, iResultCP, tRes );
}
std::unique_ptr<TokenFunctor_i> CreateQueryHighlighter ( TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const CSphIndexSettings & tIndexSettings, const char * szDoc, int iDocLen,
const CSphVector<SphHitMark_t> & dHits, int iField, SnippetResult_t & tRes )
{
return std::make_unique<QueryHighlighter_c> ( std::move ( pTokenizer ), tQuery, tIndexSettings, szDoc, iDocLen, dHits, iField, tRes );
}
std::unique_ptr<TokenFunctor_i> CreatePassageExtractor ( const SnippetsDocIndex_c & tContainer, PassageContext_t & tContext, TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const SnippetLimits_t & tLimits,
const CSphIndexSettings & tIndexSettings, const char * szDoc, int iDocLen, const CSphVector<SphHitMark_t> & dHits, int iField, SnippetResult_t & tRes )
{
return std::make_unique<PassageExtractor_c> ( tContainer, tContext, std::move ( pTokenizer ), tQuery, tLimits, tIndexSettings, szDoc, iDocLen, dHits, iField, tRes );
}
std::unique_ptr<TokenFunctor_i> CreatePassageHighlighter ( CSphVector<Passage_t*> & dPassages, TokenizerRefPtr_c pTokenizer, const SnippetQuerySettings_t & tQuery, const CSphIndexSettings & tIndexSettings,
const char * szDoc, int iDocLen, const CSphVector<SphHitMark_t> & dHits, const FunctorZoneInfo_t & tZoneInfo, int iField, SnippetResult_t & tRes )
{
return std::make_unique<PassageHighlighter_c> ( dPassages, std::move ( pTokenizer ), tQuery, tIndexSettings, szDoc, iDocLen, dHits, tZoneInfo, iField, tRes );
}
std::unique_ptr<HitCollector_i> CreateHitCollector ( SnippetsDocIndex_c & tContainer, TokenizerRefPtr_c pTokenizer, DictRefPtr_c pDict, const SnippetQuerySettings_t & tQuery, const CSphIndexSettings & tIndexSettings,
const char * szDoc, int iDocLen, int iField, CacheStreamer_i & tTokenContainer, CSphVector<ZonePacked_t> & dZones, FunctorZoneInfo_t & tZoneInfo, SnippetResult_t & tRes )
{
return std::make_unique<HitCollector_c> ( tContainer, std::move ( pTokenizer ), std::move (pDict), tQuery, tIndexSettings, szDoc, iDocLen, iField, tTokenContainer, dZones, tZoneInfo, tRes );
}
| 48,428
|
C++
|
.cpp
| 1,235
| 36.758704
| 226
| 0.708045
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,960
|
tokenizer_utf8.cpp
|
manticoresoftware_manticoresearch/src/tokenizer/tokenizer_utf8.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "tokenizerbase2_impl.h"
#include "lowercaser_impl.h"
const char* SPHINX_DEFAULT_UTF8_TABLE = "0..9, A..Z->a..z, _, a..z, U+410..U+42F->U+430..U+44F, U+430..U+44F, U+401->U+451, U+451";
/// UTF-8 tokenizer
class Tokenizer_UTF8_Base_c: public CSphTokenizerBase2
{
public:
explicit Tokenizer_UTF8_Base_c ( bool bDefaultCharset );
void SetBuffer ( const BYTE* sBuffer, int iLength ) final;
int GetCodepointLength ( int iCode ) const noexcept final;
int GetMaxCodepointLength() const noexcept final
{
return GetLowercaser().GetMaxCodepointLength();
}
};
Tokenizer_UTF8_Base_c::Tokenizer_UTF8_Base_c ( bool bDefaultCharset )
{
if ( bDefaultCharset )
{
CSphString sTmp;
SetCaseFolding ( SPHINX_DEFAULT_UTF8_TABLE, sTmp );
}
m_bHasBlend = false;
}
void Tokenizer_UTF8_Base_c::SetBuffer ( const BYTE* sBuffer, int iLength )
{
// check that old one is over and that new length is sane
assert ( iLength >= 0 );
// set buffer
m_pBuffer = sBuffer;
m_pBufferMax = sBuffer + iLength;
m_pCur = sBuffer;
m_pTokenStart = m_pTokenEnd = nullptr;
m_pBlendStart = m_pBlendEnd = nullptr;
m_iOvershortCount = 0;
m_bBoundary = m_bTokenBoundary = false;
}
int Tokenizer_UTF8_Base_c::GetCodepointLength ( int iCode ) const noexcept
{
if ( iCode < 128 )
return 1;
int iBytes = 0;
while ( iCode & 0x80 )
{
iBytes++;
iCode <<= 1;
}
assert ( iBytes >= 2 && iBytes <= 4 );
return iBytes;
}
template<bool IS_QUERY>
class CSphTokenizer_UTF8: public Tokenizer_UTF8_Base_c
{
public:
explicit CSphTokenizer_UTF8 ( bool bDefaultCharset )
: Tokenizer_UTF8_Base_c ( bDefaultCharset )
{}
BYTE * GetToken() override;
BYTE * GetTokenEscaped() override;
TokenizerRefPtr_c Clone ( ESphTokenizerClone eMode ) const noexcept final;
};
template<bool IS_QUERY>
BYTE* CSphTokenizer_UTF8<IS_QUERY>::GetToken()
{
m_bWasSpecial = false;
m_bBlended = false;
m_iOvershortCount = 0;
m_bTokenBoundary = false;
m_bWasSynonym = false;
return m_bHasBlend
? DoGetToken<IS_QUERY, true, false>()
: DoGetToken<IS_QUERY, false, false>();
}
template<bool IS_QUERY>
BYTE* CSphTokenizer_UTF8<IS_QUERY>::GetTokenEscaped()
{
m_bWasSpecial = false;
m_bBlended = false;
m_iOvershortCount = 0;
m_bTokenBoundary = false;
m_bWasSynonym = false;
return m_bHasBlend
? DoGetToken<IS_QUERY, true, true>()
: DoGetToken<IS_QUERY, false, true>();
}
template<bool IS_QUERY>
TokenizerRefPtr_c CSphTokenizer_UTF8<IS_QUERY>::Clone ( ESphTokenizerClone eMode ) const noexcept
{
CSphTokenizerBase* pClone;
if ( eMode != SPH_CLONE_INDEX )
pClone = new CSphTokenizer_UTF8<true> ( false );
else
pClone = new CSphTokenizer_UTF8<false> ( false );
pClone->CloneBase ( this, eMode );
return TokenizerRefPtr_c {pClone};
}
/// UTF-8 tokenizer with n-grams
/////////////////////////////////////////////////////////////////////////////
template<bool IS_QUERY>
class CSphTokenizer_UTF8Ngram: public CSphTokenizer_UTF8<IS_QUERY>
{
public:
explicit CSphTokenizer_UTF8Ngram ( bool bDefaultCharset )
: CSphTokenizer_UTF8<IS_QUERY> (bDefaultCharset) {}
bool SetNgramChars ( const char* sConfig, CSphString& sError ) final;
void SetNgramLen ( int iLen ) final;
BYTE* GetToken() final;
protected:
int m_iNgramLen = 1;
};
template<bool IS_QUERY>
bool CSphTokenizer_UTF8Ngram<IS_QUERY>::SetNgramChars ( const char* sConfig, CSphString& sError )
{
return ISphTokenizer::RemapCharacters ( sConfig, FLAG_CODEPOINT_NGRAM | FLAG_CODEPOINT_SPECIAL, "ngram", true, sError ); // !COMMIT support other n-gram lengths than 1
}
template<bool IS_QUERY>
void CSphTokenizer_UTF8Ngram<IS_QUERY>::SetNgramLen ( int iLen )
{
assert ( iLen > 0 );
m_iNgramLen = iLen;
}
template<bool IS_QUERY>
BYTE* CSphTokenizer_UTF8Ngram<IS_QUERY>::GetToken()
{
// !COMMIT support other n-gram lengths than 1
assert ( m_iNgramLen == 1 );
return CSphTokenizer_UTF8<IS_QUERY>::GetToken();
}
TokenizerRefPtr_c Tokenizer::Detail::CreateUTF8Tokenizer ( bool bDefaultCharset )
{
return TokenizerRefPtr_c { new CSphTokenizer_UTF8<false> ( bDefaultCharset ) };
}
TokenizerRefPtr_c Tokenizer::Detail::CreateUTF8NgramTokenizer ( bool bDefaultCharset )
{
return TokenizerRefPtr_c { new CSphTokenizer_UTF8Ngram<false> ( bDefaultCharset ) };
}
| 4,705
|
C++
|
.cpp
| 147
| 30.115646
| 168
| 0.73482
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,961
|
tokenizer_plugin.cpp
|
manticoresoftware_manticoresearch/src/tokenizer/tokenizer_plugin.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "token_filter.h"
#include "schema/schema.h"
#include "sphinxplugin.h"
class PluginFilterTokenizer_c final: public CSphTokenFilter
{
protected:
PluginTokenFilterRefPtr_c m_pFilter; ///< plugin descriptor
CSphString m_sOptions; ///< options string for the plugin init()
void* m_pUserdata = nullptr; ///< userdata returned from by the plugin init()
bool m_bGotExtra = false; ///< are we looping through extra tokens?
int m_iPosDelta = 0; ///< position delta for the current token, see comments in GetToken()
bool m_bWasBlended = false; ///< whether the last raw token was blended
~PluginFilterTokenizer_c() final
{
if ( m_pFilter->m_fnDeinit )
m_pFilter->m_fnDeinit ( m_pUserdata );
}
public:
PluginFilterTokenizer_c ( TokenizerRefPtr_c pTok, PluginTokenFilterRefPtr_c pFilter, const char* sOptions )
: CSphTokenFilter ( std::move (pTok) )
, m_pFilter ( std::move ( pFilter ) )
, m_sOptions ( sOptions )
{
assert ( m_pFilter );
// FIXME!!! handle error in constructor \ move to setup?
CSphString sError;
SetFilterSchema ( CSphSchema(), sError );
}
TokenizerRefPtr_c Clone ( ESphTokenizerClone eMode ) const noexcept final
{
return TokenizerRefPtr_c { new PluginFilterTokenizer_c ( m_pTokenizer->Clone ( eMode ), m_pFilter, m_sOptions.cstr() ) };
}
bool SetFilterSchema ( const CSphSchema& s, CSphString& sError ) final
{
if ( m_pUserdata && m_pFilter->m_fnDeinit )
m_pFilter->m_fnDeinit ( m_pUserdata );
CSphVector<const char*> dFields;
for ( int i = 0; i < s.GetFieldsCount(); i++ )
dFields.Add ( s.GetFieldName ( i ) );
char sErrBuf[SPH_UDF_ERROR_LEN + 1];
if ( m_pFilter->m_fnInit ( &m_pUserdata, dFields.GetLength(), dFields.Begin(), m_sOptions.cstr(), sErrBuf ) == 0 )
return true;
sError = sErrBuf;
return false;
}
bool SetFilterOptions ( const char* sOptions, CSphString& sError ) final
{
char sErrBuf[SPH_UDF_ERROR_LEN + 1];
if ( m_pFilter->m_fnBeginDocument ( m_pUserdata, sOptions, sErrBuf ) == 0 )
return true;
sError = sErrBuf;
return false;
}
void BeginField ( int iField ) final
{
if ( m_pFilter->m_fnBeginField )
m_pFilter->m_fnBeginField ( m_pUserdata, iField );
}
BYTE* GetToken() final
{
// we have two principal states here
// a) have pending extra tokens, keep looping and returning those
// b) no extras, keep pushing until plugin returns anything
//
// we also have to handle position deltas, and that story is a little tricky
// positions are not assigned in the tokenizer itself (we might wanna refactor that)
// however, tokenizer has some (partial) control over the keyword positions, too
// when it skips some too-short tokens, it returns a non-zero value via GetOvershortCount()
// when it returns a blended token, it returns true via TokenIsBlended()
// so while the default position delta is 1, overshorts can increase it by N,
// and blended flag can decrease it by 1, and that's under tokenizer's control
//
// so for the plugins, we simplify (well i hope!) this complexity a little
// we compute a proper position delta here, pass it, and let the plugin modify it
// we report all tokens as regular, and return the delta via GetOvershortCount()
// state (a), just loop the pending extras
if ( m_bGotExtra )
{
m_iPosDelta = 1; // default delta is 1
BYTE* pTok = (BYTE*)m_pFilter->m_fnGetExtraToken ( m_pUserdata, &m_iPosDelta );
GetBlended();
if ( pTok )
return pTok;
m_bGotExtra = false;
}
// state (b), push raw tokens, return results
for ( ;; )
{
// get next raw token, handle field end
BYTE* pRaw = CSphTokenFilter::GetToken();
if ( !pRaw )
{
// no more hits? notify plugin of a field end,
// and check if there are pending tokens
m_bGotExtra = 0;
if ( m_pFilter->m_fnEndField )
if ( !m_pFilter->m_fnEndField ( m_pUserdata ) )
{
m_bBlended = false;
m_bBlendedPart = false;
return NULL;
}
// got them, start fetching
m_bGotExtra = true;
BYTE* pTok = (BYTE*)m_pFilter->m_fnGetExtraToken ( m_pUserdata, &m_iPosDelta );
GetBlended();
return pTok;
}
// compute proper position delta
m_iPosDelta = ( m_bWasBlended ? 0 : 1 ) + CSphTokenFilter::GetOvershortCount();
m_bWasBlended = CSphTokenFilter::TokenIsBlended();
// push raw token to plugin, return a processed one, if any
int iExtra = 0;
BYTE* pTok = (BYTE*)m_pFilter->m_fnPushToken ( m_pUserdata, (char*)pRaw, &iExtra, &m_iPosDelta );
m_bGotExtra = ( iExtra != 0 );
GetBlended();
if ( pTok )
return pTok;
}
}
int GetOvershortCount() const noexcept final
{
return m_iPosDelta - 1;
}
private:
void GetBlended()
{
if ( m_pFilter->m_fnTokenIsBlended )
m_bBlended = ( !!m_pFilter->m_fnTokenIsBlended ( m_pUserdata ) );
if ( m_pFilter->m_fnTokenIsBlendedPart )
m_bBlendedPart = ( !!m_pFilter->m_fnTokenIsBlendedPart ( m_pUserdata ) );
}
};
void Tokenizer::AddPluginFilterTo ( TokenizerRefPtr_c& pTokenizer, const CSphString& sSpec, CSphString& sError )
{
StrVec_t dPlugin; // dll, filtername, options
if ( !sphPluginParseSpec ( sSpec, dPlugin, sError ) )
return;
if ( dPlugin.IsEmpty() )
return;
PluginTokenFilterRefPtr_c p = PluginAcquire<PluginTokenFilter_c> ( dPlugin[0].cstr(), PLUGIN_INDEX_TOKEN_FILTER, dPlugin[1].cstr(), sError );
if ( !p )
{
sError.SetSprintf ( "INTERNAL ERROR: plugin %s:%s loaded ok but lookup fails, error: %s", dPlugin[0].cstr(), dPlugin[1].cstr(), sError.cstr() );
return;
}
pTokenizer = new PluginFilterTokenizer_c ( std::move ( pTokenizer ), std::move (p), dPlugin[2].cstr() );
}
| 6,118
|
C++
|
.cpp
| 159
| 35.440252
| 146
| 0.699798
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,962
|
tokenizerbase2.cpp
|
manticoresoftware_manticoresearch/src/tokenizer/tokenizerbase2.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "tokenizerbase2_impl.h"
#include "lowercaser_impl.h"
#include "exceptions_trie.h"
#include "sphinxint.h"
static inline void CopySubstring ( BYTE* pDst, const BYTE* pSrc, int iLen )
{
while ( iLen-- > 0 && *pSrc )
*pDst++ = *pSrc++;
*pDst++ = '\0';
}
static FORCE_INLINE bool IsSeparator ( int iFolded, bool bFirst )
{
// eternal separator
if ( iFolded < 0 || ( iFolded & MASK_CODEPOINT ) == 0 )
return true;
// just a codepoint
if ( !( iFolded & MASK_FLAGS ) )
return false;
// any magic flag, besides dual
if ( !( iFolded & FLAG_CODEPOINT_DUAL ) )
return true;
// FIXME? n-grams currently also set dual
if ( iFolded & FLAG_CODEPOINT_NGRAM )
return true;
// dual depends on position
return bFirst;
}
int CSphTokenizerBase2::SkipBlended()
{
if ( !m_pBlendEnd )
return 0;
const BYTE* pMax = m_pBufferMax;
m_pBufferMax = m_pBlendEnd;
// loop until the blended token end
int iBlended = 0; // how many blended subtokens we have seen so far
int iAccum = 0; // how many non-blended chars in a row we have seen so far
while ( m_pCur < m_pBufferMax )
{
int iCode = GetCodepoint();
if ( iCode == '\\' )
iCode = GetCodepoint(); // no boundary check, GetCP does it
iCode = GetLowercaser().ToLower ( iCode ); // no -1 check, ToLower does it
if ( iCode < 0 )
iCode = 0;
if ( iCode & FLAG_CODEPOINT_BLEND )
iCode = 0;
if ( iCode & MASK_CODEPOINT )
{
iAccum++;
continue;
}
if ( iAccum >= m_tSettings.m_iMinWordLen )
iBlended++;
iAccum = 0;
}
if ( iAccum >= m_tSettings.m_iMinWordLen )
iBlended++;
m_pBufferMax = pMax;
return iBlended;
}
BYTE* CSphTokenizerBase2::GetBlendedVariant()
{
// we can get called on several occasions
// case 1, a new blended token was just accumulated
if ( m_bBlended && !m_bBlendAdd )
{
// fast path for the default case (trim_none)
if ( m_uBlendVariants == BLEND_TRIM_NONE )
return m_sAccum;
// analyze the full token, find non-blended bounds
m_iBlendNormalStart = -1;
m_iBlendNormalEnd = -1;
// OPTIMIZE? we can skip this based on non-blended flag from adjust
const BYTE* p = m_sAccum;
while ( *p )
{
int iLast = (int)( p - m_sAccum );
int iCode = sphUTF8Decode ( p );
if ( !( GetLowercaser().ToLower ( iCode ) & FLAG_CODEPOINT_BLEND ) )
{
m_iBlendNormalEnd = (int)( p - m_sAccum );
if ( m_iBlendNormalStart < 0 )
m_iBlendNormalStart = iLast;
}
}
// build todo mask
// check and revert a few degenerate cases
m_uBlendVariantsPending = m_uBlendVariants;
if ( m_uBlendVariantsPending & BLEND_TRIM_BOTH )
{
if ( m_iBlendNormalStart < 0 )
{
// no heading blended; revert BOTH to TAIL
m_uBlendVariantsPending &= ~BLEND_TRIM_BOTH;
m_uBlendVariantsPending |= BLEND_TRIM_TAIL;
} else if ( m_iBlendNormalEnd < 0 )
{
// no trailing blended; revert BOTH to HEAD
m_uBlendVariantsPending &= ~BLEND_TRIM_BOTH;
m_uBlendVariantsPending |= BLEND_TRIM_HEAD;
}
}
if ( m_uBlendVariantsPending & BLEND_TRIM_HEAD )
{
// either no heading blended, or pure blended; revert HEAD to NONE
if ( m_iBlendNormalStart <= 0 )
{
m_uBlendVariantsPending &= ~BLEND_TRIM_HEAD;
m_uBlendVariantsPending |= BLEND_TRIM_NONE;
}
}
if ( m_uBlendVariantsPending & BLEND_TRIM_TAIL )
{
// either no trailing blended, or pure blended; revert TAIL to NONE
if ( m_iBlendNormalEnd <= 0 || m_sAccum[m_iBlendNormalEnd] == 0 )
{
m_uBlendVariantsPending &= ~BLEND_TRIM_TAIL;
m_uBlendVariantsPending |= BLEND_TRIM_NONE;
}
}
// ok, we are going to return a few variants after all, flag that
// OPTIMIZE? add fast path for "single" variants?
m_bBlendAdd = true;
assert ( m_uBlendVariantsPending );
// we also have to stash the original blended token
// because accumulator contents may get trashed by caller (say, when stemming)
strncpy ( (char*)m_sAccumBlend, (char*)m_sAccum, sizeof ( m_sAccumBlend ) - 1 );
}
// case 2, caller is checking for pending variants, have we even got any?
if ( !m_bBlendAdd )
return nullptr;
// handle trim_none
// this MUST be the first handler, so that we could avoid copying below, and just return the original accumulator
if ( m_uBlendVariantsPending & BLEND_TRIM_NONE )
{
m_uBlendVariantsPending &= ~BLEND_TRIM_NONE;
m_bBlended = true;
return m_sAccum;
}
// handle trim_all
if ( m_uBlendVariantsPending & BLEND_TRIM_ALL )
{
m_uBlendVariantsPending &= ~BLEND_TRIM_ALL;
m_bBlended = true;
const BYTE* pSrc = m_sAccumBlend;
BYTE* pDst = m_sAccum;
while ( *pSrc )
{
int iCode = sphUTF8Decode ( pSrc );
if ( !( GetLowercaser().ToLower ( iCode ) & FLAG_CODEPOINT_BLEND ) )
pDst += sphUTF8Encode ( pDst, ( iCode & MASK_CODEPOINT ) );
}
*pDst = '\0';
return m_sAccum;
}
// handle trim_both
if ( m_uBlendVariantsPending & BLEND_TRIM_BOTH )
{
m_uBlendVariantsPending &= ~BLEND_TRIM_BOTH;
if ( m_iBlendNormalStart < 0 )
m_uBlendVariantsPending |= BLEND_TRIM_TAIL; // no heading blended; revert BOTH to TAIL
else if ( m_iBlendNormalEnd < 0 )
m_uBlendVariantsPending |= BLEND_TRIM_HEAD; // no trailing blended; revert BOTH to HEAD
else
{
assert ( m_iBlendNormalStart < m_iBlendNormalEnd );
CopySubstring ( m_sAccum, m_sAccumBlend + m_iBlendNormalStart, m_iBlendNormalEnd - m_iBlendNormalStart );
m_bBlended = true;
return m_sAccum;
}
}
// handle TRIM_HEAD
if ( m_uBlendVariantsPending & BLEND_TRIM_HEAD )
{
m_uBlendVariantsPending &= ~BLEND_TRIM_HEAD;
if ( m_iBlendNormalStart >= 0 )
{
// FIXME! need we check for overshorts?
CopySubstring ( m_sAccum, m_sAccumBlend + m_iBlendNormalStart, sizeof ( m_sAccum ) );
m_bBlended = true;
return m_sAccum;
}
}
// handle TRIM_TAIL
if ( m_uBlendVariantsPending & BLEND_TRIM_TAIL )
{
m_uBlendVariantsPending &= ~BLEND_TRIM_TAIL;
if ( m_iBlendNormalEnd > 0 )
{
// FIXME! need we check for overshorts?
CopySubstring ( m_sAccum, m_sAccumBlend, m_iBlendNormalEnd );
m_bBlended = true;
return m_sAccum;
}
}
// all clear, no more variants to go
m_bBlendAdd = false;
return nullptr;
}
bool CSphTokenizerBase2::CheckException ( const BYTE* pStart, const BYTE* pCur, bool bQueryMode )
{
assert ( m_pExc );
assert ( pStart );
// at this point [pStart,pCur) is our regular tokenization candidate,
// and pCur is pointing at what normally is considered separtor
//
// however, it might be either a full exception (if we're really lucky)
// or (more likely) an exception prefix, so lets check for that
//
// interestingly enough, note that our token might contain a full exception
// as a prefix, for instance [USAF] token vs [USA] exception; but in that case
// we still need to tokenize regularly, because even exceptions need to honor
// word boundaries
// lets begin with a special (hopefully fast) check for the 1st byte
const BYTE* p = pStart;
if ( m_pExc->GetFirst ( *p ) < 0 )
return false;
// consume all the (character data) bytes until the first separator
int iNode = 0;
while ( p < pCur )
{
if ( bQueryMode && *p == '\\' )
{
p++;
continue;
}
iNode = m_pExc->GetNext ( iNode, *p++ );
if ( iNode < 0 )
return false;
}
const BYTE* pMapEnd = nullptr; // the longest exception found so far is [pStart,pMapEnd)
const BYTE* pMapTo = nullptr; // the destination mapping
bool bHasQueryQuote = false;
// now, we got ourselves a valid exception prefix, so lets keep consuming more bytes,
// ie. until further separators, and keep looking for a full exception match
while ( iNode >= 0 )
{
// in query mode, ignore quoting slashes
if ( bQueryMode && *p == '\\' )
{
p++;
bHasQueryQuote = true;
continue;
}
// decode one more codepoint, check if it is a separator
bool bSep = true;
bool bSpace = sphIsSpace ( *p ); // okay despite utf-8, cause hard whitespace is all ascii-7
const BYTE* q = p;
if ( p < m_pBufferMax )
bSep = IsSeparator ( GetLowercaser().ToLower ( sphUTF8Decode ( q ) ), false ); // FIXME? sometimes they ARE first
// there is a separator ahead, so check if we have a full match
if ( bSep && m_pExc->GetMapping ( iNode ) )
{
pMapEnd = p;
pMapTo = m_pExc->GetMapping ( iNode );
}
// eof? bail
if ( p >= m_pBufferMax )
break;
// not eof? consume those bytes
if ( bSpace )
{
// and fold (hard) whitespace while we're at it!
while ( sphIsSpace ( *p ) )
p++;
iNode = m_pExc->GetNext ( iNode, ' ' );
} else
{
// just consume the codepoint, byte-by-byte
while ( p < q && iNode >= 0 )
iNode = m_pExc->GetNext ( iNode, *p++ );
}
// we just consumed a separator, so check for a full match again
if ( iNode >= 0 && bSep && m_pExc->GetMapping ( iNode ) )
{
pMapEnd = p;
pMapTo = m_pExc->GetMapping ( iNode );
}
}
// found anything?
if ( !pMapTo )
return false;
strncpy ( (char*)m_sAccum, (char*)const_cast<BYTE*> ( pMapTo ), sizeof ( m_sAccum ) - 1 );
m_pTokenStart = pStart;
m_iLastTokenLen = (int)strlen ( (char*)m_sAccum );
if ( bHasQueryQuote )
{
// move backpointer to the head of the quoting sequence
while ( pMapEnd-1>=pStart && *(pMapEnd-1)=='\\' )
pMapEnd--;
}
m_pCur = pMapEnd;
m_pTokenEnd = pMapEnd;
m_bWasSynonym = true;
return true;
}
void CSphTokenizerBase2::FlushAccum()
{
assert ( m_pAccum - m_sAccum < (int)sizeof ( m_sAccum ) );
m_iLastTokenLen = m_iAccum;
*m_pAccum = 0;
m_iAccum = 0;
m_pAccum = m_sAccum;
}
| 9,927
|
C++
|
.cpp
| 315
| 28.612698
| 116
| 0.681889
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,963
|
lowercaser.cpp
|
manticoresoftware_manticoresearch/src/tokenizer/lowercaser.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "lowercaser_impl.h"
#include "sphinxstd.h"
#include "std/fnv64.h"
void CSphLowercaser::Reset()
{
m_dData.Reset ( CHUNK_SIZE );
m_dData.ZeroVec();
m_iChunks = 1;
m_pChunk[0] = m_dData.begin(); // chunk 0 must always be allocated, for utf-8 tokenizer shortcut to work
for ( int i = 1; i < CHUNK_COUNT; ++i )
m_pChunk[i] = nullptr;
InvalidateStoredClones();
}
void CSphLowercaser::SetRemap ( const CSphLowercaser* pLC )
{
if ( !pLC )
return;
m_iChunks = pLC->m_iChunks;
m_dData.Reset ( m_iChunks * CHUNK_SIZE );
m_dData.CopyFrom ( pLC->m_dData );
for ( int i = 0; i < CHUNK_COUNT; ++i )
m_pChunk[i] = pLC->m_pChunk[i] ? pLC->m_pChunk[i] - pLC->m_dData.begin() + m_dData.begin() : nullptr;
InvalidateStoredClones();
}
void CSphLowercaser::AddChars ( const char* szChars, DWORD uAddFlags, DWORD uResetFlags )
{
assert ( szChars );
auto iChars = (int)strlen ( szChars );
CSphVector<CSphRemapRange> dRemaps;
for ( int i = 0; i < iChars; ++i )
dRemaps.Add ( (CSphRemapRange)szChars[i] );
AddRemaps ( dRemaps, uAddFlags, uResetFlags );
}
bool CSphLowercaser::CheckRemap ( CSphString& sError, const VecTraits_T<CSphRemapRange>& dRemaps, const char* sSource, bool bCanRemap ) const noexcept
{
// check
for ( const CSphRemapRange& r : dRemaps )
{
for ( int j = r.m_iStart; j <= r.m_iEnd; ++j )
if ( ToLower ( j ) )
{
sError.SetSprintf ( "%s characters must not be referenced anywhere else (code=U+%x)", sSource, j );
return false;
}
if ( !bCanRemap )
continue;
for ( int j = r.m_iRemapStart; j <= r.m_iRemapStart + r.m_iEnd - r.m_iStart; ++j )
if ( ToLower ( j ) )
{
sError.SetSprintf ( "%s characters must not be referenced anywhere else (code=U+%x)", sSource, j );
return false;
}
}
return true;
}
void CSphLowercaser::AddRemaps ( const VecTraits_T<CSphRemapRange>& dRemaps, DWORD uAddFlags, DWORD uResetFlags )
{
if ( dRemaps.IsEmpty() )
return;
// build new chunks map
enum { was_unused, was_used, is_used_now } dUsed[CHUNK_COUNT];
for ( int i = 0; i < CHUNK_COUNT; ++i )
dUsed[i] = m_pChunk[i] ? was_used : was_unused;
int iNewChunks = m_iChunks;
for ( const auto& tRemap: dRemaps )
{
assert ( tRemap.m_iStart >= 0 && tRemap.m_iStart < MAX_CODE );
assert ( tRemap.m_iEnd >= 0 && tRemap.m_iEnd < MAX_CODE );
assert ( tRemap.m_iRemapStart >= 0 && tRemap.m_iRemapStart < MAX_CODE );
assert ( ( tRemap.m_iRemapStart + tRemap.m_iEnd - tRemap.m_iStart ) >= 0 && ( tRemap.m_iRemapStart + tRemap.m_iEnd - tRemap.m_iStart ) < MAX_CODE );
for ( int iChunk = ( tRemap.m_iStart >> CHUNK_BITS ); iChunk <= ( tRemap.m_iEnd >> CHUNK_BITS ); ++iChunk )
if ( dUsed[iChunk] == was_unused )
{
dUsed[iChunk] = is_used_now;
++iNewChunks;
}
}
// alloc new tables and copy, if necessary
bool bChanged = false;
if ( iNewChunks > m_iChunks )
{
CSphFixedVector<DWORD> dData { iNewChunks * CHUNK_SIZE };
dData.ZeroVec();
auto* pChunk = dData.begin();
for ( int i = 0; i < CHUNK_COUNT; ++i )
{
auto* pOldChunk = m_pChunk[i];
// build new ptr
if ( dUsed[i] != was_unused )
{
m_pChunk[i] = pChunk;
pChunk += CHUNK_SIZE;
bChanged = true;
}
// copy old data
if ( dUsed[i] == was_used )
memcpy ( m_pChunk[i], pOldChunk, CHUNK_BYTES );
}
assert ( pChunk - dData.begin() == iNewChunks * CHUNK_SIZE );
m_dData.SwapData ( dData );
m_iChunks = iNewChunks;
}
// fill new stuff
for ( const CSphRemapRange &tRemap : dRemaps )
{
auto uRemapped = (DWORD)tRemap.m_iRemapStart;
for ( int j = tRemap.m_iStart; j <= tRemap.m_iEnd; ++j, ++uRemapped )
{
assert ( m_pChunk[j >> CHUNK_BITS] );
auto& uCodepoint = m_pChunk[j >> CHUNK_BITS][j & CHUNK_MASK];
auto uNew = uRemapped | uAddFlags | ( uCodepoint & MASK_FLAGS );
uNew &= ~uResetFlags;
if ( ( uCodepoint & MASK_CODEPOINT ) && ( uAddFlags & FLAG_CODEPOINT_SPECIAL ) )
uNew |= FLAG_CODEPOINT_DUAL;
bChanged |= uCodepoint!=uNew;
uCodepoint = uNew;
}
}
if ( bChanged )
InvalidateStoredClones();
}
int CSphLowercaser::GetMaxCodepointLength() const noexcept
{
DWORD uMax = 0;
for ( auto pChunk : m_pChunk )
if ( pChunk )
for ( int i = 0; i < CHUNK_SIZE; ++i )
{
auto uCode = pChunk[i] & MASK_CODEPOINT;
if ( uMax < uCode )
uMax = uCode;
}
if ( uMax < 0x80 )
return 1;
if ( uMax < 0x800 )
return 2;
return 3; // actually, 4 once we hit 0x10000
}
uint64_t CSphLowercaser::GetFNV() const noexcept
{
return sphFNV64 ( m_dData );
}
void CSphLowercaser::InvalidateStoredClones() noexcept
{
++m_iGeneration;
}
#define UPCLONESTART( name ) \
void CSphLowercaser::Up##name##Clone () const noexcept \
{ \
if ( m_iGeneration == m_i##name##Gen ) \
return;\
\
ScopedMutex_t _ { m_tLock }; \
if ( m_iGeneration == m_i##name##Gen ) \
return; \
LowercaserRefcountedPtr pLC { new CSphLowercaser }; \
pLC->SetRemap ( this );
#define UPCLONEEND( name ) \
m_p##name##LC = pLC.Leak(); \
m_i##name##Gen = m_iGeneration; \
}
UPCLONESTART ( Query )
pLC->AddChars ( "\\", FLAG_CODEPOINT_SPECIAL );
UPCLONEEND ( Query )
UPCLONESTART ( QueryWildExactJson )
pLC->AddChars ( "*?%", 0, FLAG_CODEPOINT_IGNORE );
pLC->AddChars ( "\\=", FLAG_CODEPOINT_SPECIAL );
UPCLONEEND ( QueryWildExactJson )
UPCLONESTART ( QueryWildExact )
pLC->AddChars ( "*?%", 0, FLAG_CODEPOINT_IGNORE );
pLC->AddChars ( "\\=()|-!@~\"/^$<", FLAG_CODEPOINT_SPECIAL );
UPCLONEEND ( QueryWildExact )
UPCLONESTART ( QueryWildJson )
pLC->AddChars ( "*?%", 0, FLAG_CODEPOINT_IGNORE );
pLC->AddChars ( "\\", FLAG_CODEPOINT_SPECIAL );
UPCLONEEND ( QueryWildJson )
UPCLONESTART ( QueryWild )
pLC->AddChars ( "*?%", 0, FLAG_CODEPOINT_IGNORE );
pLC->AddChars ( "\\()|-!@~\"/^$<", FLAG_CODEPOINT_SPECIAL );
UPCLONEEND ( QueryWild )
UPCLONESTART ( QueryExactJson )
pLC->AddChars ( "\\=", FLAG_CODEPOINT_SPECIAL );
UPCLONEEND ( QueryExactJson )
UPCLONESTART ( QueryExact )
pLC->AddChars ( "\\=()|-!@~\"/^$<", FLAG_CODEPOINT_SPECIAL );
UPCLONEEND ( QueryExact )
UPCLONESTART ( Query_ )
pLC->AddChars ( "\\()|-!@~\"/^$<", FLAG_CODEPOINT_SPECIAL );
UPCLONEEND ( Query_ )
#undef UPCLONEEND
#undef UPCLONESTART
| 6,631
|
C++
|
.cpp
| 202
| 30.252475
| 150
| 0.666249
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,964
|
charset_definition_parser.cpp
|
manticoresoftware_manticoresearch/src/tokenizer/charset_definition_parser.cpp
|
//
// Copyright (c) 2008-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
//#include "charset_definition_parser.h"
#include "tok_internals.h"
CSphVector<CharsetAlias_t> CSphCharsetDefinitionParser::m_dCharsetAliases;
const CSphVector<CharsetAlias_t>& CSphCharsetDefinitionParser::GetCharsetAliases()
{
return m_dCharsetAliases;
}
CSphString CSphCharsetDefinitionParser::GetLastError()
{
return (CSphString)m_sError;
}
bool CSphCharsetDefinitionParser::IsEof()
{
return ( *m_pCurrent ) == 0;
}
bool CSphCharsetDefinitionParser::CheckEof()
{
if ( !IsEof() )
return false;
Error ( "unexpected end of line" );
return true;
}
bool CSphCharsetDefinitionParser::Error ( const char* szMessage )
{
m_sError.Clear();
m_sError << szMessage << " near '" << m_pCurrent << "'";
return false;
}
inline static int HexDigit ( int c )
{
if ( c >= '0' && c <= '9' )
return c - '0';
if ( c >= 'a' && c <= 'f' )
return c - 'a' + 10;
if ( c >= 'A' && c <= 'F' )
return c - 'A' + 10;
return 0;
}
void CSphCharsetDefinitionParser::SkipSpaces()
{
while ( ( *m_pCurrent ) && isspace ( (BYTE)*m_pCurrent ) )
++m_pCurrent;
}
int CSphCharsetDefinitionParser::ParseCharsetCode()
{
const char* p = m_pCurrent;
int iCode = 0;
if ( p[0] == 'U' && p[1] == '+' )
{
p += 2;
while ( isxdigit ( *p ) )
{
iCode = iCode * 16 + HexDigit ( *p++ );
}
while ( isspace ( *p ) )
++p;
} else
{
if ( ( *(const BYTE*)p ) < 32 || ( *(const BYTE*)p ) > 127 )
{
Error ( "non-ASCII characters not allowed, use 'U+00AB' syntax" );
return -1;
}
iCode = *p++;
while ( isspace ( *p ) )
++p;
}
m_pCurrent = p;
return iCode;
}
bool AddRange ( CSphRemapRange tRange, CSphVector<RemapRangeTagged_t>& dRanges, CSphString* pError )
{
if ( tRange.m_iRemapStart < 0x20 )
{
if ( pError )
pError->SetSprintf ( "dest range (U+%x) below U+20, not allowed", tRange.m_iRemapStart );
return false;
}
dRanges.Add ( RemapRangeTagged_t { tRange } );
dRanges.Last().m_iTag = dRanges.GetLength();
return true;
}
bool CSphCharsetDefinitionParser::AddRange ( CSphRemapRange tRange, CSphVector<RemapRangeTagged_t>& dRanges )
{
CSphString sError;
if ( !::AddRange ( tRange, dRanges, &sError ) )
{
Error ( sError.cstr() );
return false;
}
return true;
}
// Charsets relocated to folder 'charsets', each one in separate .txt file.
// When you change the content of the folder,
// reconfigure the project with cmake in order to pick the changes.
#include "globalaliases.h"
bool CSphCharsetDefinitionParser::InitCharsetAliasTable ( CSphString& sError )
{
m_dCharsetAliases.Reset();
CSphVector<CharsetAlias_t> dAliases;
CSphVector<char> dConcat;
const int iTotalChunks = sizeof ( globalaliases ) / sizeof ( globalaliases[0] );
int iCurAliasChunk = 0;
for ( const char* szAliasName : globalaliases_names )
{
CharsetAlias_t& tCur = dAliases.Add();
tCur.m_sName = szAliasName;
tCur.m_iNameLen = tCur.m_sName.Length();
dConcat.Resize ( 0 );
while ( iCurAliasChunk < iTotalChunks && globalaliases[iCurAliasChunk] )
{
auto iChunkLen = (int)strlen ( globalaliases[iCurAliasChunk] );
char* szChunk = dConcat.AddN ( iChunkLen );
memcpy ( szChunk, globalaliases[iCurAliasChunk], iChunkLen );
++iCurAliasChunk;
}
dConcat.Add ( 0 );
++iCurAliasChunk;
if ( !sphParseCharset ( dConcat.Begin(), tCur.m_dRemaps, &sError ) )
return false;
}
m_dCharsetAliases.SwapData ( dAliases );
return true;
}
void RebaseRange ( CSphRemapRange& dRange, int iNewStart )
{
dRange.m_iRemapStart = iNewStart + dRange.m_iRemapStart - std::exchange ( dRange.m_iStart, iNewStart );
}
void MergeIntersectedRanges ( CSphVector<RemapRangeTagged_t>& dRanges )
{
// need a stable sort with the desc order of the mappings
// to keep the last mapping definition and merge into it all next entries (entries defined prior to the last mapping)
for ( bool bKeepGoing = true; bKeepGoing; )
{
bKeepGoing = false;
// first stage - we flatten all the ranges
dRanges.Sort();
CSphVector<RemapRangeTagged_t> dExtraRanges;
for ( int i = 0; i < dRanges.GetLength() - 1; ++i )
{
auto& dFirst = dRanges[i];
auto& dSecond = dRanges[i+1];
assert ( dFirst.m_iStart <= dSecond.m_iStart ); // because vec is sorted
if ( dFirst.m_iEnd < dSecond.m_iStart ) // no intersection, bail
continue;
if ( dFirst.m_iStart == dSecond.m_iStart )
{
if ( dFirst.m_iEnd == dSecond.m_iEnd )
{
assert ( dSecond.m_iTag < dFirst.m_iTag ); // because of sorting order
dRanges.Remove ( i + 1 ); // ranges are the same - keep one with bigger tag
--i;
bKeepGoing = true;
continue;
}
if ( dFirst.m_iEnd > dSecond.m_iEnd )
std::swap ( dFirst, dSecond );
assert ( dFirst.m_iEnd < dSecond.m_iEnd );
// 11
// 222 => produce 11, 22 and extra tail 2
dExtraRanges.Add ( dSecond );
RebaseRange ( dExtraRanges.Last(), dFirst.m_iEnd + 1 );
dSecond.m_iEnd = dFirst.m_iEnd;
continue;
}
// 111...
// 22...
assert ( dFirst.m_iStart < dSecond.m_iStart );
if ( dFirst.m_iEnd < dSecond.m_iEnd )
{
// 111
// 222 => produce head 1, head 22 and extra middle 11, and tail 2
dExtraRanges.Add ( dFirst );
RebaseRange ( dExtraRanges.Last(), dSecond.m_iStart );
dExtraRanges.Add ( dSecond );
RebaseRange ( dExtraRanges.Last(), dFirst.m_iEnd + 1 );
dSecond.m_iEnd = std::exchange ( dFirst.m_iEnd, dSecond.m_iStart - 1 );
continue;
}
if ( dFirst.m_iEnd == dSecond.m_iEnd )
{
// 111
// 22 => produce head 1, and extra middle 11
dExtraRanges.Add ( dFirst );
RebaseRange ( dExtraRanges.Last(), dSecond.m_iStart );
dFirst.m_iEnd = dSecond.m_iStart - 1;
continue;
}
assert ( dFirst.m_iEnd > dSecond.m_iEnd );
// 1111
// 22 => produce head 1, extra middle 11 and tail 1
dExtraRanges.Add ( dFirst );
dExtraRanges.Last().m_iEnd = dSecond.m_iEnd;
RebaseRange ( dExtraRanges.Last(), dSecond.m_iStart );
dExtraRanges.Add ( dFirst );
RebaseRange ( dExtraRanges.Last(), dSecond.m_iEnd + 1 );
dFirst.m_iEnd = dSecond.m_iStart - 1;
}
dRanges.Append ( dExtraRanges );
bKeepGoing |= !dExtraRanges.IsEmpty();
}
dRanges.Sort();
#define PARANOID 0
#ifndef NDEBUG
#if PARANOID
for ( int i = 0; i < dRanges.GetLength() - 1; ++i )
{
auto& dFirst = dRanges[i];
auto& dSecond = dRanges[i + 1];
assert ( dFirst.m_iStart <= dFirst.m_iEnd );
assert ( dSecond.m_iStart <= dSecond.m_iEnd );
assert ( dFirst.m_iEnd < dSecond.m_iStart );
}
#endif
#endif
// stage 2 - merge sibling ranges. Reuse tag as 'delta'
for ( auto& dRange : dRanges ) dRange.m_iTag = dRange.m_iRemapStart - dRange.m_iStart;
for ( int i = 0; i < dRanges.GetLength() - 1; ++i )
{
auto& dFirst = dRanges[i];
auto& dSecond = dRanges[i + 1];
if ( dFirst.m_iEnd + 1 == dSecond.m_iStart && dFirst.m_iTag == dSecond.m_iTag )
{
dFirst.m_iEnd = dSecond.m_iEnd;
dRanges.Remove ( i + 1 );
--i;
}
}
}
bool CSphCharsetDefinitionParser::Parse ( const char* sConfig, CSphVector<CSphRemapRange>& dRanges )
{
m_pCurrent = sConfig;
dRanges.Reset();
CSphVector<RemapRangeTagged_t> dOrderedRanges;
AT_SCOPE_EXIT([&dRanges,&dOrderedRanges]() {
dRanges.Resize ( dOrderedRanges.GetLength() );
ARRAY_CONSTFOREACH ( i, dOrderedRanges )
dRanges[i] = (CSphRemapRange)dOrderedRanges[i]; // slice out m_iTag
});
// do parse
while ( *m_pCurrent )
{
SkipSpaces();
if ( IsEof() )
break;
// check for stray comma
if ( *m_pCurrent == ',' )
return Error ( "stray ',' not allowed, use 'U+002C' instead" );
// alias
bool bGotAlias = false;
ARRAY_FOREACH_COND ( i, m_dCharsetAliases, !bGotAlias )
{
const CharsetAlias_t& tCur = m_dCharsetAliases[i];
bGotAlias = ( strncmp ( tCur.m_sName.cstr(), m_pCurrent, tCur.m_iNameLen ) == 0 && ( !m_pCurrent[tCur.m_iNameLen] || m_pCurrent[tCur.m_iNameLen] == ',' ) );
if ( !bGotAlias )
continue;
// skip to next definition
m_pCurrent += tCur.m_iNameLen;
if ( *m_pCurrent && *m_pCurrent == ',' )
++m_pCurrent;
for ( const auto& dRemap : tCur.m_dRemaps )
{
if ( !AddRange ( dRemap, dOrderedRanges ) )
return false;
}
}
if ( bGotAlias )
continue;
// parse char code
const char* pStart = m_pCurrent;
int iStart = ParseCharsetCode();
if ( iStart < 0 )
return false;
// stray char?
if ( !*m_pCurrent || *m_pCurrent == ',' )
{
// stray char
if ( !AddRange ( { iStart, iStart, iStart }, dOrderedRanges ) )
return false;
if ( IsEof() )
break;
++m_pCurrent;
continue;
}
// stray remap?
if ( m_pCurrent[0] == '-' && m_pCurrent[1] == '>' )
{
// parse and add
m_pCurrent += 2;
int iDest = ParseCharsetCode();
if ( iDest < 0 )
return false;
if ( !AddRange ( { iStart, iStart, iDest }, dOrderedRanges ) )
return false;
// it's either end of line now, or must be followed by comma
if ( *m_pCurrent )
if ( *m_pCurrent++ != ',' )
return Error ( "syntax error" );
continue;
}
// range start?
if ( !( m_pCurrent[0] == '.' && m_pCurrent[1] == '.' ) )
return Error ( "syntax error" );
m_pCurrent += 2;
SkipSpaces();
if ( CheckEof() )
return false;
// parse range end char code
int iEnd = ParseCharsetCode();
if ( iEnd < 0 )
return false;
if ( iStart > iEnd )
{
m_pCurrent = pStart;
return Error ( "range end less than range start" );
}
// stray range?
if ( !*m_pCurrent || *m_pCurrent == ',' )
{
if ( !AddRange ( { iStart, iEnd, iStart }, dOrderedRanges ) )
return false;
if ( IsEof() )
break;
++m_pCurrent;
continue;
}
// "checkerboard" range?
if ( m_pCurrent[0] == '/' && m_pCurrent[1] == '2' )
{
for ( int i = iStart; i < iEnd; i += 2 )
{
if ( !AddRange ( { i, i, i + 1 }, dOrderedRanges ) )
return false;
if ( !AddRange ( { i + 1, i + 1, i + 1 }, dOrderedRanges ) )
return false;
}
// skip "/2", expect ","
m_pCurrent += 2;
SkipSpaces();
if ( *m_pCurrent )
if ( *m_pCurrent++ != ',' )
return Error ( "expected end of line or ','" );
continue;
}
// remapped range?
if ( !( m_pCurrent[0] == '-' && m_pCurrent[1] == '>' ) )
return Error ( "expected end of line, ',' or '-><char>'" );
m_pCurrent += 2;
SkipSpaces();
if ( CheckEof() )
return false;
// parse dest start
const char* pRemapStart = m_pCurrent;
int iRemapStart = ParseCharsetCode();
if ( iRemapStart < 0 )
return false;
// expect '..'
if ( CheckEof() )
return false;
if ( !( m_pCurrent[0] == '.' && m_pCurrent[1] == '.' ) )
return Error ( "expected '..'" );
m_pCurrent += 2;
// parse dest end
int iRemapEnd = ParseCharsetCode();
if ( iRemapEnd < 0 )
return false;
// check dest range
if ( iRemapStart > iRemapEnd )
{
m_pCurrent = pRemapStart;
return Error ( "dest range end less than dest range start" );
}
// check for length mismatch
if ( ( iRemapEnd - iRemapStart ) != ( iEnd - iStart ) )
{
m_pCurrent = pStart;
return Error ( "dest range length must match src range length" );
}
// remapped ok
if ( !AddRange ( { iStart, iEnd, iRemapStart }, dOrderedRanges ) )
return false;
if ( IsEof() )
break;
if ( *m_pCurrent != ',' )
return Error ( "expected ','" );
++m_pCurrent;
}
MergeIntersectedRanges ( dOrderedRanges );
return true;
}
/// public exports
bool sphParseCharset ( const char* szCharset, CSphVector<CSphRemapRange>& dRemaps, CSphString *pError )
{
CSphCharsetDefinitionParser tParser;
if ( tParser.Parse ( szCharset, dRemaps ) )
return true;
if ( pError )
*pError = tParser.GetLastError();
return false;
}
const CSphVector<CharsetAlias_t>& GetCharsetAliases()
{
return CSphCharsetDefinitionParser::GetCharsetAliases();
}
bool sphInitCharsetAliasTable ( CSphString& sError )
{
return CSphCharsetDefinitionParser::InitCharsetAliasTable ( sError );
}
| 12,338
|
C++
|
.cpp
| 419
| 26.307876
| 159
| 0.653775
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,965
|
tokenizer_multiform.cpp
|
manticoresoftware_manticoresearch/src/tokenizer/tokenizer_multiform.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "token_filter.h"
#include "sphinxstd.h"
#include "charset_definition_parser.h"
#include "sphinxdefs.h"
#include "multiform_container.h"
struct StoredToken_t
{
BYTE m_sToken[3 * SPH_MAX_WORD_LEN + 4];
// tokenized state
const char* m_szTokenStart;
const char* m_szTokenEnd;
const char* m_pBufferPtr;
const char* m_pBufferEnd;
int m_iTokenLen;
int m_iOvershortCount;
bool m_bBoundary;
bool m_bSpecial;
bool m_bBlended;
bool m_bBlendedPart;
};
void FillStoredTokenInfo ( StoredToken_t& tToken, const BYTE* sToken, const TokenizerRefPtr_c& pTokenizer )
{
assert ( sToken && pTokenizer );
strncpy ( (char*)tToken.m_sToken, (const char*)sToken, sizeof ( tToken.m_sToken ) - 1 );
tToken.m_szTokenStart = pTokenizer->GetTokenStart();
tToken.m_szTokenEnd = pTokenizer->GetTokenEnd();
tToken.m_iOvershortCount = pTokenizer->GetOvershortCount();
tToken.m_iTokenLen = pTokenizer->GetLastTokenLen();
tToken.m_pBufferPtr = pTokenizer->GetBufferPtr();
tToken.m_pBufferEnd = pTokenizer->GetBufferEnd();
tToken.m_bBoundary = pTokenizer->GetBoundary();
tToken.m_bSpecial = pTokenizer->WasTokenSpecial();
tToken.m_bBlended = pTokenizer->TokenIsBlended();
tToken.m_bBlendedPart = pTokenizer->TokenIsBlendedPart();
}
/// token filter for multiforms support
class MultiformTokenizer: public CSphTokenFilter
{
using Base = CSphTokenFilter;
public:
MultiformTokenizer ( TokenizerRefPtr_c pTokenizer, const CSphMultiformContainer* pContainer );
public:
void SetBuffer ( const BYTE* sBuffer, int iLength ) final;
BYTE* GetToken() final;
void EnableTokenizedMultiformTracking() final
{
m_bBuildMultiform = true;
}
int GetLastTokenLen() const noexcept final
{
return m_iStart < m_dStoredTokens.GetLength() ? m_dStoredTokens[m_iStart].m_iTokenLen : Base::GetLastTokenLen();
}
bool GetBoundary() const noexcept final
{
return m_iStart < m_dStoredTokens.GetLength() ? m_dStoredTokens[m_iStart].m_bBoundary : Base::GetBoundary();
}
bool WasTokenSpecial() const noexcept final
{
return m_iStart < m_dStoredTokens.GetLength() ? m_dStoredTokens[m_iStart].m_bSpecial : Base::WasTokenSpecial();
}
int GetOvershortCount() const noexcept final
{
return m_iStart < m_dStoredTokens.GetLength() ? m_dStoredTokens[m_iStart].m_iOvershortCount : Base::GetOvershortCount();
}
BYTE* GetTokenizedMultiform() noexcept final
{
return m_sTokenizedMultiform[0] ? m_sTokenizedMultiform : nullptr;
}
bool TokenIsBlended() const noexcept final
{
return m_iStart < m_dStoredTokens.GetLength() ? m_dStoredTokens[m_iStart].m_bBlended : Base::TokenIsBlended();
}
bool TokenIsBlendedPart() const noexcept final
{
return m_iStart < m_dStoredTokens.GetLength() ? m_dStoredTokens[m_iStart].m_bBlendedPart : Base::TokenIsBlendedPart();
}
int SkipBlended() final;
public:
TokenizerRefPtr_c Clone ( ESphTokenizerClone eMode ) const noexcept final;
const char* GetTokenStart() const noexcept final
{
return m_iStart < m_dStoredTokens.GetLength() ? m_dStoredTokens[m_iStart].m_szTokenStart : Base::GetTokenStart();
}
const char* GetTokenEnd() const noexcept final
{
return m_iStart < m_dStoredTokens.GetLength() ? m_dStoredTokens[m_iStart].m_szTokenEnd : Base::GetTokenEnd();
}
const char* GetBufferPtr() const noexcept final
{
return m_iStart < m_dStoredTokens.GetLength() ? m_dStoredTokens[m_iStart].m_pBufferPtr : Base::GetBufferPtr();
}
void SetBufferPtr ( const char* sNewPtr ) final;
uint64_t GetSettingsFNV() const noexcept final;
bool WasTokenMultiformDestination ( bool& bHead, int& iDestCount ) const noexcept final;
private:
const CSphMultiformContainer* m_pMultiWordforms;
int m_iStart = 0;
int m_iOutputPending = -1;
const CSphMultiform* m_pCurrentForm = nullptr;
bool m_bBuildMultiform = false;
BYTE m_sTokenizedMultiform[3 * SPH_MAX_WORD_LEN + 4];
CSphVector<StoredToken_t> m_dStoredTokens;
};
//////////////////////////////////////////////////////////////////////////
MultiformTokenizer::MultiformTokenizer ( TokenizerRefPtr_c pTokenizer, const CSphMultiformContainer* pContainer )
: CSphTokenFilter ( std::move (pTokenizer) )
, m_pMultiWordforms ( pContainer )
{
assert ( pContainer );
m_dStoredTokens.Reserve ( pContainer->m_iMaxTokens + 6 ); // max form tokens + some blended tokens
m_sTokenizedMultiform[0] = '\0';
}
BYTE* MultiformTokenizer::GetToken()
{
if ( m_iOutputPending > -1 && m_pCurrentForm )
{
if ( ++m_iOutputPending >= m_pCurrentForm->m_dNormalForm.GetLength() )
{
m_iOutputPending = -1;
m_pCurrentForm = nullptr;
} else
{
StoredToken_t& tStart = m_dStoredTokens[m_iStart];
strncpy ( (char*)tStart.m_sToken, m_pCurrentForm->m_dNormalForm[m_iOutputPending].m_sForm.cstr(), sizeof ( tStart.m_sToken ) - 1 );
tStart.m_iTokenLen = m_pCurrentForm->m_dNormalForm[m_iOutputPending].m_iLengthCP;
tStart.m_bBoundary = false;
tStart.m_bSpecial = false;
tStart.m_bBlended = false;
tStart.m_bBlendedPart = false;
return tStart.m_sToken;
}
}
m_sTokenizedMultiform[0] = '\0';
m_iStart++;
if ( m_iStart >= m_dStoredTokens.GetLength() )
{
m_iStart = 0;
m_dStoredTokens.Resize ( 0 );
const BYTE* pToken = CSphTokenFilter::GetToken();
if ( !pToken )
return nullptr;
FillStoredTokenInfo ( m_dStoredTokens.Add(), pToken, m_pTokenizer );
while ( m_dStoredTokens.Last().m_bBlended || m_dStoredTokens.Last().m_bBlendedPart )
{
pToken = CSphTokenFilter::GetToken();
if ( !pToken )
break;
FillStoredTokenInfo ( m_dStoredTokens.Add(), pToken, m_pTokenizer );
}
}
CSphMultiforms** pWordforms = nullptr;
int iTokensGot = 1;
bool bBlended = false;
// check multi-form
// only blended parts checked for multi-form with blended
// in case ALL blended parts got transformed primary blended got replaced by normal form
// otherwise blended tokens provided as is
if ( m_dStoredTokens[m_iStart].m_bBlended || m_dStoredTokens[m_iStart].m_bBlendedPart )
{
if ( m_dStoredTokens[m_iStart].m_bBlended && m_iStart + 1 < m_dStoredTokens.GetLength() && m_dStoredTokens[m_iStart + 1].m_bBlendedPart )
{
pWordforms = m_pMultiWordforms->m_Hash ( (const char*)m_dStoredTokens[m_iStart + 1].m_sToken );
if ( pWordforms )
{
bBlended = true;
for ( int i = m_iStart + 2; i < m_dStoredTokens.GetLength(); i++ )
{
// break out on blended over or got completely different blended
if ( m_dStoredTokens[i].m_bBlended || !m_dStoredTokens[i].m_bBlendedPart )
break;
iTokensGot++;
}
}
}
} else
{
pWordforms = m_pMultiWordforms->m_Hash ( (const char*)m_dStoredTokens[m_iStart].m_sToken );
if ( pWordforms )
{
int iTokensNeed = ( *pWordforms )->m_iMaxTokens + 1;
int iCur = m_iStart;
bool bGotBlended = false;
// collect up ahead to multi-form tokens or all blended tokens or phrase starts or phrase ends
while ( ( iTokensGot<iTokensNeed || bGotBlended ) && m_dStoredTokens.Last().m_sToken[0]!='"' )
{
iCur++;
if ( iCur >= m_dStoredTokens.GetLength() )
{
// fetch next token
const BYTE* pToken = CSphTokenFilter::GetToken();
if ( !pToken )
break;
FillStoredTokenInfo ( m_dStoredTokens.Add(), pToken, m_pTokenizer );
}
bool bCurBleneded = ( m_dStoredTokens[iCur].m_bBlended || m_dStoredTokens[iCur].m_bBlendedPart );
if ( bGotBlended && !bCurBleneded )
break;
bGotBlended = bCurBleneded;
// count only regular tokens; can not fold mixed (regular+blended) tokens to form
iTokensGot += ( bGotBlended ? 0 : 1 );
}
}
}
if ( !pWordforms || iTokensGot < ( *pWordforms )->m_iMinTokens + 1 )
return m_dStoredTokens[m_iStart].m_sToken;
int iStartToken = m_iStart + ( bBlended ? 1 : 0 );
for ( const auto& pCurForm : ( *pWordforms )->m_pForms )
{
int iFormTokCount = pCurForm->m_dTokens.GetLength();
if ( iTokensGot < iFormTokCount + 1 || ( bBlended && iTokensGot != iFormTokCount + 1 ) )
continue;
int iForm = 0;
for ( ; iForm < iFormTokCount; iForm++ )
{
const StoredToken_t& tTok = m_dStoredTokens[iStartToken + 1 + iForm];
const char* szStored = (const char*)tTok.m_sToken;
const char* szNormal = pCurForm->m_dTokens[iForm].cstr();
if ( *szNormal != *szStored || strcasecmp ( szNormal, szStored ) )
break;
}
// early out - no destination form detected
if ( iForm != iFormTokCount )
continue;
// tokens after folded form are valid tail that should be processed next time
if ( m_bBuildMultiform )
{
BYTE* pOut = m_sTokenizedMultiform;
BYTE* pMax = pOut + sizeof ( m_sTokenizedMultiform );
for ( int j = 0; j < iFormTokCount + 1 && pOut < pMax; j++ )
{
const StoredToken_t& tTok = m_dStoredTokens[iStartToken + j];
const BYTE* sTok = tTok.m_sToken;
if ( j && pOut < pMax )
*pOut++ = ' ';
while ( *sTok && pOut < pMax )
*pOut++ = *sTok++;
}
*pOut = '\0';
*( pMax - 1 ) = '\0';
}
if ( !bBlended )
{
// fold regular tokens to form
const StoredToken_t& tStart = m_dStoredTokens[m_iStart];
StoredToken_t& tEnd = m_dStoredTokens[m_iStart + iFormTokCount];
m_iStart += iFormTokCount;
strncpy ( (char*)tEnd.m_sToken, pCurForm->m_dNormalForm[0].m_sForm.cstr(), sizeof ( tEnd.m_sToken ) - 1 );
tEnd.m_szTokenStart = tStart.m_szTokenStart;
tEnd.m_iTokenLen = pCurForm->m_dNormalForm[0].m_iLengthCP;
tEnd.m_bBoundary = false;
tEnd.m_bSpecial = false;
tEnd.m_bBlended = false;
tEnd.m_bBlendedPart = false;
if ( pCurForm->m_dNormalForm.GetLength() > 1 )
{
m_iOutputPending = 0;
m_pCurrentForm = pCurForm;
}
} else
{
// replace blended by form
// FIXME: add multiple destination token support here (if needed)
assert ( pCurForm->m_dNormalForm.GetLength() == 1 );
StoredToken_t& tDst = m_dStoredTokens[m_iStart];
strncpy ( (char*)tDst.m_sToken, pCurForm->m_dNormalForm[0].m_sForm.cstr(), sizeof ( tDst.m_sToken ) - 1 );
tDst.m_iTokenLen = pCurForm->m_dNormalForm[0].m_iLengthCP;
}
break;
}
return m_dStoredTokens[m_iStart].m_sToken;
}
TokenizerRefPtr_c MultiformTokenizer::Clone ( ESphTokenizerClone eMode ) const noexcept
{
auto pClone = m_pTokenizer->Clone ( eMode );
Tokenizer::AddToMultiformFilterTo ( pClone, m_pMultiWordforms );
return pClone;
}
void MultiformTokenizer::SetBufferPtr ( const char* sNewPtr )
{
m_iStart = 0;
m_iOutputPending = -1;
m_pCurrentForm = nullptr;
m_dStoredTokens.Resize ( 0 );
CSphTokenFilter::SetBufferPtr ( sNewPtr );
}
void MultiformTokenizer::SetBuffer ( const BYTE* sBuffer, int iLength )
{
CSphTokenFilter::SetBuffer ( sBuffer, iLength );
SetBufferPtr ( (const char*)sBuffer );
}
uint64_t MultiformTokenizer::GetSettingsFNV() const noexcept
{
uint64_t uHash = CSphTokenFilter::GetSettingsFNV();
uHash ^= (uint64_t)m_pMultiWordforms;
return uHash;
}
int MultiformTokenizer::SkipBlended()
{
bool bGotBlended = ( m_iStart < m_dStoredTokens.GetLength() && ( m_dStoredTokens[m_iStart].m_bBlended || m_dStoredTokens[m_iStart].m_bBlendedPart ) );
if ( !bGotBlended )
return 0;
int iWasStart = m_iStart;
for ( int iTok = m_iStart + 1; iTok < m_dStoredTokens.GetLength() && m_dStoredTokens[iTok].m_bBlendedPart && !m_dStoredTokens[iTok].m_bBlended; iTok++ )
m_iStart = iTok;
return m_iStart - iWasStart;
}
bool MultiformTokenizer::WasTokenMultiformDestination ( bool& bHead, int& iDestCount ) const noexcept
{
if ( m_iOutputPending > -1 && m_pCurrentForm && m_pCurrentForm->m_dNormalForm.GetLength() > 1 && m_iOutputPending < m_pCurrentForm->m_dNormalForm.GetLength() )
{
bHead = ( m_iOutputPending == 0 );
iDestCount = m_pCurrentForm->m_dNormalForm.GetLength();
return true;
}
return false;
}
void Tokenizer::AddToMultiformFilterTo ( TokenizerRefPtr_c& pTokenizer, const CSphMultiformContainer* pContainer )
{
if ( pContainer )
pTokenizer = new MultiformTokenizer ( std::move ( pTokenizer ), pContainer );
}
| 12,311
|
C++
|
.cpp
| 332
| 34.243976
| 160
| 0.716885
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,966
|
tokenizer.cpp
|
manticoresoftware_manticoresearch/src/tokenizer/tokenizer.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "tokenizer.h"
#include "sphinxstd.h"
#include "sphinxint.h"
#include "charset_definition_parser.h"
#include "lowercaser_impl.h"
/////////////////////////////////////////////////////////////////////////////
bool ISphTokenizer::SetCaseFolding ( const char * sConfig, CSphString & sError )
{
CSphVector<CSphRemapRange> dRemaps;
if ( !sphParseCharset ( sConfig, dRemaps, &sError ) )
return false;
const int MIN_CODE = 0x21;
ARRAY_FOREACH ( i, dRemaps )
{
CSphRemapRange & tMap = dRemaps[i];
if ( tMap.m_iStart<MIN_CODE || tMap.m_iStart>= CSphLowercaser::MAX_CODE )
{
sphWarning ( "wrong character mapping start specified: U+%x, should be between U+%x and U+%x (inclusive); CLAMPED", tMap.m_iStart, MIN_CODE, CSphLowercaser::MAX_CODE-1 );
tMap.m_iStart = Min ( Max ( tMap.m_iStart, MIN_CODE ), CSphLowercaser::MAX_CODE-1 );
}
if ( tMap.m_iEnd<MIN_CODE || tMap.m_iEnd>= CSphLowercaser::MAX_CODE)
{
sphWarning ( "wrong character mapping end specified: U+%x, should be between U+%x and U+%x (inclusive); CLAMPED", tMap.m_iEnd, MIN_CODE, CSphLowercaser::MAX_CODE-1 );
tMap.m_iEnd = Min ( Max ( tMap.m_iEnd, MIN_CODE ), CSphLowercaser::MAX_CODE-1 );
}
if ( tMap.m_iRemapStart<MIN_CODE || tMap.m_iRemapStart>= CSphLowercaser::MAX_CODE )
{
sphWarning ( "wrong character remapping start specified: U+%x, should be between U+%x and U+%x (inclusive); CLAMPED", tMap.m_iRemapStart, MIN_CODE, CSphLowercaser::MAX_CODE-1 );
tMap.m_iRemapStart = Min ( Max ( tMap.m_iRemapStart, MIN_CODE ), CSphLowercaser::MAX_CODE-1 );
}
int iRemapEnd = tMap.m_iRemapStart+tMap.m_iEnd-tMap.m_iStart;
if ( iRemapEnd<MIN_CODE || iRemapEnd>= CSphLowercaser::MAX_CODE )
{
sphWarning ( "wrong character remapping end specified: U+%x, should be between U+%x and U+%x (inclusive); IGNORED", iRemapEnd, MIN_CODE, CSphLowercaser::MAX_CODE-1 );
dRemaps.Remove(i);
--i;
}
}
auto& tLC = StagingLowercaser();
tLC.Reset ();
tLC.AddRemaps ( dRemaps );
return true;
}
void ISphTokenizer::AddPlainChars ( const char* szChars )
{
StagingLowercaser().AddChars ( szChars );
}
void ISphTokenizer::AddSpecials ( const char * sSpecials )
{
StagingLowercaser().AddChars ( sSpecials, FLAG_CODEPOINT_SPECIAL );
}
void ISphTokenizer::Setup ( const CSphTokenizerSettings & tSettings )
{
m_tSettings = tSettings;
}
CSphLowercaser& ISphTokenizer::StagingLowercaser()
{
LowercaserRefcountedPtr pNewCreatedLC;
CSphLowercaser* pStagingLC = m_pStagingLC.load ( std::memory_order_relaxed );
do {
if ( pStagingLC )
return *pStagingLC;
pNewCreatedLC = new CSphLowercaser;
if ( m_pLC )
pNewCreatedLC->SetRemap ( m_pLC );
} while ( !m_pStagingLC.compare_exchange_weak ( pStagingLC, pNewCreatedLC, std::memory_order_relaxed ) );
m_pLC = pNewCreatedLC;
return *pNewCreatedLC;
}
LowercaserRefcountedConstPtr ISphTokenizer::GetLC() const noexcept
{
assert ( m_pLC );
return m_pLC;
}
void ISphTokenizer::SetLC ( LowercaserRefcountedConstPtr rhs )
{
assert ( !m_pLC );
m_pLC = std::move ( rhs );
m_pStagingLC.store ( nullptr, std::memory_order_relaxed ); // we NEVER const_cast LC. That is critically important!
}
bool ISphTokenizer::AddSpecialsSPZ ( const char* sSpecials, const char* sDirective, CSphString& sError )
{
for ( int i = 0; sSpecials[i]; ++i )
if ( m_pLC->ToLower ( sSpecials[i] ) & ( FLAG_CODEPOINT_NGRAM | FLAG_CODEPOINT_BOUNDARY | FLAG_CODEPOINT_IGNORE ) )
{
sError.SetSprintf ( "%s requires that character '%c' is not in ngram_chars, phrase_boundary, or ignore_chars", sDirective, sSpecials[i] );
return false;
}
AddSpecials ( sSpecials );
return true;
}
bool ISphTokenizer::EnableSentenceIndexing ( CSphString& sError )
{
constexpr char sSpecials[] = { '.', '?', '!', MAGIC_CODE_PARAGRAPH, '\0' };
if ( !AddSpecialsSPZ ( sSpecials, "index_sp", sError ) )
return false;
m_bDetectSentences = true;
return true;
}
bool ISphTokenizer::EnableZoneIndexing ( CSphString& sError )
{
constexpr char sSpecials[] = { MAGIC_CODE_ZONE, '\0' };
return AddSpecialsSPZ ( sSpecials, "index_zones", sError );
}
uint64_t ISphTokenizer::GetSettingsFNV() const noexcept
{
uint64_t uHash = GetLowercaser().GetFNV();
DWORD uFlags = 0;
if ( m_bBlendSkipPure )
uFlags |= 1 << 1;
if ( m_bShortTokenFilter )
uFlags |= 1 << 2;
uHash = sphFNV64 ( &uFlags, sizeof ( uFlags ), uHash );
uHash = sphFNV64 ( &m_uBlendVariants, sizeof ( m_uBlendVariants ), uHash );
uHash = sphFNV64 ( &m_tSettings.m_iType, sizeof ( m_tSettings.m_iType ), uHash );
uHash = sphFNV64 ( &m_tSettings.m_iMinWordLen, sizeof ( m_tSettings.m_iMinWordLen ), uHash );
uHash = sphFNV64 ( &m_tSettings.m_iNgramLen, sizeof ( m_tSettings.m_iNgramLen ), uHash );
if ( !m_tSynFileInfo.m_sFilename.IsEmpty() )
uHash = sphFNV64 ( m_tSynFileInfo.m_sFilename.cstr(), m_tSynFileInfo.m_sFilename.Length(), uHash );
return uHash;
}
bool ISphTokenizer::RemapCharacters ( const char* sConfig, DWORD uFlags, const char* sSource, bool bCanRemap, CSphString& sError )
{
// parse
CSphVector<CSphRemapRange> dRemaps;
if ( !sphParseCharset ( sConfig, dRemaps, &sError ) )
return false;
if ( !m_pLC->CheckRemap ( sError, dRemaps, sSource, bCanRemap ) )
return false;
// add mapping
StagingLowercaser().AddRemaps ( dRemaps, uFlags );
return true;
}
bool ISphTokenizer::SetBoundary ( const char* sConfig, CSphString& sError )
{
return RemapCharacters ( sConfig, FLAG_CODEPOINT_BOUNDARY, "phrase boundary", false, sError );
}
bool ISphTokenizer::SetIgnoreChars ( const char* sConfig, CSphString& sError )
{
return RemapCharacters ( sConfig, FLAG_CODEPOINT_IGNORE, "ignored", false, sError );
}
bool ISphTokenizer::SetBlendChars ( const char* sConfig, CSphString& sError )
{
return sConfig ? RemapCharacters ( sConfig, FLAG_CODEPOINT_BLEND, "blend", true, sError ) : false;
}
static bool sphStrncmp ( const char* sCheck, int iCheck, const char* sRef )
{
return ( iCheck == (int)strlen ( sRef ) && memcmp ( sCheck, sRef, iCheck ) == 0 );
}
bool ISphTokenizer::SetBlendMode ( const char* sMode, CSphString& sError )
{
if ( !sMode || !*sMode )
{
m_uBlendVariants = BLEND_TRIM_NONE;
m_bBlendSkipPure = false;
return true;
}
m_uBlendVariants = 0;
const char* p = sMode;
while ( *p )
{
while ( !sphIsAlpha ( *p ) )
p++;
if ( !*p )
break;
const char* sTok = p;
while ( sphIsAlpha ( *p ) )
p++;
int iLen = int ( p - sTok );
if ( sphStrncmp ( sTok, iLen, "trim_none" ) )
m_uBlendVariants |= BLEND_TRIM_NONE;
else if ( sphStrncmp ( sTok, iLen, "trim_head" ) )
m_uBlendVariants |= BLEND_TRIM_HEAD;
else if ( sphStrncmp ( sTok, iLen, "trim_tail" ) )
m_uBlendVariants |= BLEND_TRIM_TAIL;
else if ( sphStrncmp ( sTok, iLen, "trim_both" ) )
m_uBlendVariants |= BLEND_TRIM_BOTH;
else if ( sphStrncmp ( sTok, iLen, "trim_all" ) )
m_uBlendVariants |= BLEND_TRIM_ALL;
else if ( sphStrncmp ( sTok, iLen, "skip_pure" ) )
m_bBlendSkipPure = true;
else
{
sError.SetSprintf ( "unknown blend_mode option near '%s'", sTok );
return false;
}
}
if ( !m_uBlendVariants )
{
sError.SetSprintf ( "blend_mode must define at least one variant to index" );
m_uBlendVariants = BLEND_TRIM_NONE;
m_bBlendSkipPure = false;
return false;
}
return true;
}
TokenizerRefPtr_c Tokenizer::Create ( const CSphTokenizerSettings & tSettings, const CSphEmbeddedFiles * pFiles, FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings, CSphString & sError )
{
TokenizerRefPtr_c pResult;
TokenizerRefPtr_c pTokenizer;
switch ( tSettings.m_iType )
{
case TOKENIZER_UTF8: pTokenizer = Tokenizer::Detail::CreateUTF8Tokenizer ( tSettings.m_sCaseFolding.IsEmpty() ); break;
case TOKENIZER_NGRAM: pTokenizer = Tokenizer::Detail::CreateUTF8NgramTokenizer ( tSettings.m_sCaseFolding.IsEmpty() ); break;
default:
sError.SetSprintf ( "failed to create tokenizer (unknown charset type '%d')", tSettings.m_iType );
return pResult;
}
pTokenizer->Setup ( tSettings );
if ( !tSettings.m_sCaseFolding.IsEmpty () && !pTokenizer->SetCaseFolding ( tSettings.m_sCaseFolding.cstr (), sError ) )
{
sError.SetSprintf ( "'charset_table': %s", sError.cstr() );
return pResult;
}
CSphString sSynonymsFile = tSettings.m_sSynonymsFile;
if ( !sSynonymsFile.IsEmpty() )
{
if ( pFilenameBuilder )
sSynonymsFile = pFilenameBuilder->GetFullPath(sSynonymsFile);
if ( !pTokenizer->LoadSynonyms ( sSynonymsFile.cstr(), pFiles && pFiles->m_bEmbeddedSynonyms ? pFiles : nullptr, dWarnings, sError ) )
{
sError.SetSprintf ( "'synonyms': %s", sError.cstr() );
return pResult;
}
}
if ( !tSettings.m_sBoundary.IsEmpty () && !pTokenizer->SetBoundary ( tSettings.m_sBoundary.cstr (), sError ) )
{
sError.SetSprintf ( "'phrase_boundary': %s", sError.cstr() );
return pResult;
}
if ( !tSettings.m_sIgnoreChars.IsEmpty () && !pTokenizer->SetIgnoreChars ( tSettings.m_sIgnoreChars.cstr (), sError ) )
{
sError.SetSprintf ( "'ignore_chars': %s", sError.cstr() );
return pResult;
}
if ( !tSettings.m_sBlendChars.IsEmpty () && !pTokenizer->SetBlendChars ( tSettings.m_sBlendChars.cstr (), sError ) )
{
sError.SetSprintf ( "'blend_chars': %s", sError.cstr() );
return pResult;
}
if ( !pTokenizer->SetBlendMode ( tSettings.m_sBlendMode.cstr (), sError ) )
{
sError.SetSprintf ( "'blend_mode': %s", sError.cstr() );
return pResult;
}
pTokenizer->SetNgramLen ( tSettings.m_iNgramLen );
if ( !tSettings.m_sNgramChars.IsEmpty () && !pTokenizer->SetNgramChars ( tSettings.m_sNgramChars.cstr (), sError ) )
{
sError.SetSprintf ( "'ngram_chars': %s", sError.cstr() );
return pResult;
}
pResult = std::move (pTokenizer);
return pResult;
}
| 10,143
|
C++
|
.cpp
| 265
| 35.85283
| 194
| 0.70973
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,967
|
exceptions_trie.cpp
|
manticoresoftware_manticoresearch/src/tokenizer/exceptions_trie.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "exceptions_trie.h"
#include "sphinxstd.h"
#include "fileio.h"
#include "sphinxutils.h"
#include "sphinxjson.h"
#include "tok_internals.h"
/////////////////////////////////////////////////////////////////////////////
// TOKENIZING EXCEPTIONS
/////////////////////////////////////////////////////////////////////////////
using WriterTrie_fn = std::function<void (const char*)>;
struct WriterTrie_t
{
WriterTrie_t ( const VecTraits_T<BYTE> & dData, int iMappings, WriterTrie_fn && fnWrite )
: m_dData ( dData )
, m_iMappings ( iMappings )
, m_fnWrite ( std::move ( fnWrite ) )
{
}
const VecTraits_T<BYTE> & m_dData;
int m_iMappings;
GtEscapedBuilder m_sLine;
int m_iCount = 0;
CSphVector<BYTE> m_dPrefix;
WriterTrie_fn m_fnWrite;
bool m_bAddNL = false;
void Write ( int iNode );
};
/// exceptions trie, stored in a tidy simple blob
/// we serialize each trie node as follows:
///
/// int result_offset, 0 if no output mapping
/// BYTE num_bytes, 0 if no further valid bytes can be accepted
/// BYTE values[num_bytes], known accepted byte values
/// BYTE offsets[num_bytes], and the respective next node offsets
///
/// output mappings themselves are serialized just after the nodes,
/// as plain old ASCIIZ strings
void WriterTrie_t::Write ( int iNode )
{
assert ( iNode>=0 && iNode<m_iMappings );
const BYTE * p = m_dData.Begin() + iNode;
int iTo = *(int*)const_cast<BYTE*> ( p );
if ( iTo>0 )
{
m_sLine.Clear();
const char * sTo = (const char *)m_dData.Begin() + iTo;
m_sLine.AppendEscapedWithCommaNoQuotes ( (char*)m_dPrefix.Begin(), m_dPrefix.GetLength() );
m_sLine.Appendf ( " => %s", sTo );
if ( m_bAddNL )
m_sLine.Appendf ( "\n" );
m_fnWrite ( m_sLine.cstr() );
m_iCount++;
}
int n = p[4];
if ( n==0 )
return;
p += 5;
for ( int i=0; i<n; i++ )
{
m_dPrefix.Add ( p[i] );
int iChild = *(int*)&p[n + 4 * i];
Write ( iChild );
m_dPrefix.Pop();
}
}
void ExceptionsTrie_c::Export ( Writer_i & tWr ) const
{
WriterTrie_t tDataWriter ( m_dData, m_iMappings, [&tWr] ( const char * szLine ) { tWr.PutString ( szLine ); } );
tDataWriter.m_bAddNL = true;
tWr.PutDword ( m_iCount );
tDataWriter.Write ( 0 );
assert ( tDataWriter.m_iCount==m_iCount );
}
void ExceptionsTrie_c::Export ( JsonEscapedBuilder & tOut ) const
{
WriterTrie_t tDataWriter ( m_dData, m_iMappings, [&tOut] (const char* szLine) { tOut.FixupSpacedAndAppendEscaped ( szLine ); } );
tDataWriter.Write ( 0 );
assert ( tDataWriter.m_iCount==m_iCount );
}
/// intermediate exceptions trie node
/// only used by ExceptionsTrieGen_c, while building a blob
class ExceptionsTrieNode_c
{
friend class ExceptionsTrieGen_c;
struct Entry_t
{
BYTE m_uValue;
ExceptionsTrieNode_c* m_pKid;
};
CSphString m_sTo; ///< output mapping for current prefix, if any
CSphVector<Entry_t> m_dKids; ///< known and accepted incoming byte values
public:
~ExceptionsTrieNode_c()
{
for ( auto& dKid: m_dKids )
SafeDelete ( dKid.m_pKid );
}
/// returns false on a duplicate "from" part, or true on success
bool AddMapping ( const BYTE* sFrom, const BYTE* sTo )
{
// no more bytes to consume? this is our output mapping then
if ( !*sFrom )
{
if ( !m_sTo.IsEmpty() )
return false;
m_sTo = (const char*)sTo;
return true;
}
int i;
for ( i = 0; i < m_dKids.GetLength(); i++ )
if ( m_dKids[i].m_uValue == *sFrom )
break;
if ( i == m_dKids.GetLength() )
{
Entry_t& t = m_dKids.Add();
t.m_uValue = *sFrom;
t.m_pKid = new ExceptionsTrieNode_c();
}
return m_dKids[i].m_pKid->AddMapping ( sFrom + 1, sTo );
}
};
/// exceptions trie builder
/// plain old text mappings in, nice useful trie out
class ExceptionsTrieGen_c::Impl_c
{
ExceptionsTrieNode_c* m_pRoot;
int m_iCount;
public:
Impl_c()
{
m_pRoot = new ExceptionsTrieNode_c();
m_iCount = 0;
}
~Impl_c()
{
SafeDelete ( m_pRoot );
}
/// trims left/right whitespace, folds inner whitespace
void FoldSpace ( char* s ) const
{
// skip leading spaces
char* d = s;
while ( *s && ( sphIsSpace ( *s ) || *s=='\\' ) )
s++;
// handle degenerate (empty string) case
if ( !*s )
{
*d = '\0';
return;
}
while ( *s )
{
// copy another token, add exactly 1 space after it, and skip whitespace
while ( *s && !sphIsSpace ( *s ) )
{
if ( *s=='\\' )
s++;
else
*d++ = *s++;
}
*d++ = ' ';
while ( sphIsSpace ( *s ) || *s=='\\' )
s++;
}
// replace that last space that we added
d[-1] = '\0';
}
bool ParseLine ( char* sBuffer, CSphString& sError )
{
#define LOC_ERR( _arg ) { sError = _arg; return false; }
assert ( m_pRoot );
// extract map-from and map-to parts
char* sSplit = strstr ( sBuffer, "=>" );
if ( !sSplit )
LOC_ERR ( "mapping token (=>) not found" );
char* sFrom = sBuffer;
char* sTo = sSplit + 2; // skip "=>"
*sSplit = '\0';
// trim map-from, map-to
FoldSpace ( sFrom );
FoldSpace ( sTo );
if ( !*sFrom )
LOC_ERR ( "empty map-from part" );
if ( !*sTo )
LOC_ERR ( "empty map-to part" );
if ( (int)strlen ( sFrom ) > MAX_KEYWORD_BYTES )
LOC_ERR ( "map-from part too long" );
if ( (int)strlen ( sTo ) > MAX_KEYWORD_BYTES )
LOC_ERR ( "map-from part too long" );
// all parsed ok; add it!
if ( m_pRoot->AddMapping ( (BYTE*)sFrom, (BYTE*)sTo ) )
m_iCount++;
else
LOC_ERR ( "duplicate map-from part" );
return true;
#undef LOC_ERR
}
ExceptionsTrie_c* Build()
{
if ( !m_pRoot || !m_pRoot->m_sTo.IsEmpty() || m_pRoot->m_dKids.GetLength() == 0 )
return nullptr;
auto* pRes = new ExceptionsTrie_c();
pRes->m_iCount = m_iCount;
// save the nodes themselves
CSphVector<BYTE> dMappings;
SaveNode ( pRes, m_pRoot, dMappings );
// append and fixup output mappings
CSphVector<BYTE>& d = pRes->m_dData;
pRes->m_iMappings = d.GetLength();
d.Append ( dMappings );
BYTE* p = d.Begin();
BYTE* pMax = p + pRes->m_iMappings;
while ( p < pMax )
{
// fixup offset in the current node, if needed
int* pOff = (int*)p; // FIXME? unaligned
if ( ( *pOff ) < 0 )
*pOff = 0; // convert -1 to 0 for non-outputs
else
( *pOff ) += pRes->m_iMappings; // fixup offsets for outputs
// proceed to the next node
int n = p[4];
p += 5 + 5 * n;
}
assert ( p == pMax );
// build the speedup table for the very 1st byte
for (int & i : pRes->m_dFirst)
i = -1;
int n = d[4];
for ( int i = 0; i < n; i++ )
pRes->m_dFirst[d[5 + i]] = *(int*)&pRes->m_dData[5 + n + 4 * i];
SafeDelete ( m_pRoot );
m_pRoot = new ExceptionsTrieNode_c();
m_iCount = 0;
return pRes;
}
private:
void SaveInt ( CSphVector<BYTE>& v, int p, int x )
{
#if USE_LITTLE_ENDIAN
v[p] = x & 0xff;
v[p + 1] = ( x >> 8 ) & 0xff;
v[p + 2] = ( x >> 16 ) & 0xff;
v[p + 3] = ( x >> 24 ) & 0xff;
#else
v[p] = ( x >> 24 ) & 0xff;
v[p + 1] = ( x >> 16 ) & 0xff;
v[p + 2] = ( x >> 8 ) & 0xff;
v[p + 3] = x & 0xff;
#endif
}
int SaveNode ( ExceptionsTrie_c* pRes, ExceptionsTrieNode_c* pNode, CSphVector<BYTE>& dMappings )
{
CSphVector<BYTE>& d = pRes->m_dData; // shortcut
// remember the start node offset
int iRes = d.GetLength();
int n = pNode->m_dKids.GetLength();
assert ( !( pNode->m_sTo.IsEmpty() && n == 0 ) );
// save offset into dMappings, or temporary (!) save -1 if there is no output mapping
// note that we will fixup those -1's to 0's afterwards
int iOff = -1;
if ( !pNode->m_sTo.IsEmpty() )
{
iOff = dMappings.GetLength();
int iLen = pNode->m_sTo.Length();
memcpy ( dMappings.AddN ( iLen + 1 ), pNode->m_sTo.cstr(), iLen + 1 );
}
d.AddN ( 4 );
SaveInt ( d, d.GetLength() - 4, iOff );
// sort children nodes by value
pNode->m_dKids.Sort ( bind ( &ExceptionsTrieNode_c::Entry_t::m_uValue ) );
// save num_values, and values[]
d.Add ( (BYTE)n );
ARRAY_FOREACH ( i, pNode->m_dKids )
d.Add ( pNode->m_dKids[i].m_uValue );
// save offsets[], and the respective child nodes
int p = d.GetLength();
d.AddN ( 4 * n );
for ( int i = 0; i < n; i++, p += 4 )
SaveInt ( d, p, SaveNode ( pRes, pNode->m_dKids[i].m_pKid, dMappings ) );
assert ( p == iRes + 5 + 5 * n );
// done!
return iRes;
}
};
ExceptionsTrieGen_c::ExceptionsTrieGen_c()
{
m_pImpl = new Impl_c;
}
ExceptionsTrieGen_c::~ExceptionsTrieGen_c()
{
delete m_pImpl;
}
void ExceptionsTrieGen_c::FoldSpace ( char* s ) const
{
m_pImpl->FoldSpace ( s );
}
bool ExceptionsTrieGen_c::ParseLine ( char* sBuffer, CSphString& sError )
{
return m_pImpl->ParseLine ( sBuffer, sError );
}
ExceptionsTrie_c* ExceptionsTrieGen_c::Build()
{
return m_pImpl->Build();
}
| 9,099
|
C++
|
.cpp
| 316
| 26.123418
| 130
| 0.625201
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,968
|
tokenizer_bigram.cpp
|
manticoresoftware_manticoresearch/src/tokenizer/tokenizer_bigram.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "token_filter.h"
#include "sphinxstd.h"
#include "sphinxdefs.h"
#include "sphinxint.h"
/// token filter for bigram indexing
///
/// passes tokens through until an eligible pair is found
/// then buffers and returns that pair as a blended token
/// then returns the first token as a regular one
/// then pops the first one and cycles again
///
/// pair (aka bigram) eligibility depends on bigram_index value
/// "all" means that all token pairs gets indexed
/// "first_freq" means that 1st token must be from bigram_freq_words
/// "both_freq" means that both tokens must be from bigram_freq_words
class BigramTokenizer: public CSphTokenFilter
{
protected:
enum {
BIGRAM_CLEAN, ///< clean slate, nothing accumulated
BIGRAM_PAIR, ///< just returned a pair from m_sBuf, and m_iFirst/m_pSecond are correct
BIGRAM_FIRST ///< just returned a first token from m_sBuf, so m_iFirst/m_pSecond are still good
} m_eState = BIGRAM_CLEAN;
BYTE m_sBuf[MAX_KEYWORD_BYTES]; ///< pair buffer
BYTE* m_pSecond = nullptr; ///< second token pointer
int m_iFirst = 0; ///< first token length, bytes
CSphRefcountedPtr<const BigramTokenizer> m_pFather; ///< used by clones to share state
// unchanged state (not need to copy on clone)
ESphBigram m_eMode; ///< bigram indexing mode
BYTE m_uMaxLen = 0; ///< max bigram_freq_words length
CSphFixedVector<int> m_dWordsHash { 0 }; ///< offsets into m_dWords hashed by 1st byte
CSphVector<BYTE> m_dWords; ///< case-folded, sorted bigram_freq_words
public:
BigramTokenizer ( TokenizerRefPtr_c pTok, ESphBigram eMode, StrVec_t& dWords )
: CSphTokenFilter ( std::move (pTok) )
{
assert ( eMode != SPH_BIGRAM_NONE );
assert ( eMode == SPH_BIGRAM_ALL || dWords.GetLength() );
m_sBuf[0] = 0;
m_dWordsHash.Reset ( 256 );
m_dWordsHash.ZeroVec();
m_eMode = eMode;
// only keep unique, real, short enough words
dWords.Uniq();
for ( const auto& sWord : dWords )
{
int iLen = Min ( sWord.Length(), (int)255 );
if ( !iLen )
continue;
m_uMaxLen = Max ( m_uMaxLen, (BYTE)iLen );
// hash word blocks by the first letter
BYTE uFirst = *(BYTE*)const_cast<char*> ( sWord.cstr() );
if ( !m_dWordsHash[uFirst] )
{
m_dWords.Add ( 0 ); // end marker for the previous block
m_dWordsHash[uFirst] = m_dWords.GetLength(); // hash new block
}
// store that word
m_dWords.ReserveGap ( iLen + 1 );
m_dWords.Add ( BYTE ( iLen ) );
m_dWords.Append ( sWord.cstr(), iLen );
}
m_dWords.Add ( 0 );
}
BigramTokenizer ( TokenizerRefPtr_c pTok, const BigramTokenizer* pBase )
: CSphTokenFilter ( std::move (pTok) )
{
m_sBuf[0] = 0;
m_eMode = pBase->m_eMode;
m_uMaxLen = pBase->m_uMaxLen;
if ( pBase->m_pFather )
m_pFather = pBase->m_pFather;
else
{
pBase->AddRef();
m_pFather = pBase;
}
}
TokenizerRefPtr_c Clone ( ESphTokenizerClone eMode ) const noexcept final
{
return TokenizerRefPtr_c { new BigramTokenizer ( m_pTokenizer->Clone ( eMode ), this ) };
}
bool TokenIsBlended() const noexcept final
{
if ( m_eState == BIGRAM_PAIR )
return true;
if ( m_eState == BIGRAM_FIRST )
return false;
return m_pTokenizer->TokenIsBlended();
}
bool IsFreq ( int iLen, BYTE* sWord ) const
{
// early check
if ( iLen > m_uMaxLen )
return false;
if ( m_pFather )
return m_pFather->IsFreq ( iLen, sWord );
// hash lookup, then linear scan
int iPos = m_dWordsHash[*sWord];
if ( !iPos )
return false;
while ( m_dWords[iPos] )
{
if ( m_dWords[iPos] == iLen && !memcmp ( sWord, &m_dWords[iPos + 1], iLen ) )
break;
iPos += 1 + m_dWords[iPos];
}
return m_dWords[iPos] != 0;
}
BYTE* GetToken() final
{
if ( m_eState == BIGRAM_FIRST || m_eState == BIGRAM_CLEAN )
{
BYTE* pFirst;
if ( m_eState == BIGRAM_FIRST )
{
// first out, clean slate again, actually
// and second will now become our next first
assert ( m_pSecond );
m_eState = BIGRAM_CLEAN;
pFirst = m_pSecond;
m_pSecond = nullptr;
} else
{
// just clean slate
// assure we're, well, clean
assert ( !m_pSecond );
pFirst = CSphTokenFilter::GetToken();
}
// clean slate
// get first non-blended token
if ( !pFirst )
return nullptr;
// pass through blended
// could handle them as first too, but.. cumbersome
if ( CSphTokenFilter::TokenIsBlended() )
return pFirst;
// check pair
// in first_freq and both_freq modes, 1st token must be listed
m_iFirst = (int)strlen ( (const char*)pFirst );
if ( m_eMode != SPH_BIGRAM_ALL && !IsFreq ( m_iFirst, pFirst ) )
return pFirst;
// copy it
// subsequent calls can and will override token accumulator
memcpy ( m_sBuf, pFirst, m_iFirst + 1 );
// grow a pair!
// get a second one (lookahead, in a sense)
BYTE* pSecond = CSphTokenFilter::GetToken();
// eof? oi
if ( !pSecond )
return m_sBuf;
// got a pair!
// check combined length
m_pSecond = pSecond;
auto iSecond = (int)strlen ( (const char*)pSecond );
if ( m_iFirst + iSecond + 1 > SPH_MAX_WORD_LEN )
{
// too long pair
// return first token as is
m_eState = BIGRAM_FIRST;
return m_sBuf;
}
// check pair
// in freq2 mode, both tokens must be listed
if ( m_eMode == SPH_BIGRAM_BOTHFREQ && !IsFreq ( iSecond, m_pSecond ) )
{
m_eState = BIGRAM_FIRST;
return m_sBuf;
}
// ok, this is a eligible pair
// begin with returning first+second pair (as blended)
m_eState = BIGRAM_PAIR;
m_sBuf[m_iFirst] = MAGIC_WORD_BIGRAM;
assert ( m_iFirst + strlen ( (const char*)pSecond ) < sizeof ( m_sBuf ) );
strcpy ( (char*)m_sBuf + m_iFirst + 1, (const char*)pSecond ); // NOLINT
return m_sBuf;
} else if ( m_eState == BIGRAM_PAIR )
{
// pair (aka bigram) out, return first token as a regular token
m_eState = BIGRAM_FIRST;
m_sBuf[m_iFirst] = 0;
return m_sBuf;
}
assert ( 0 && "unhandled bigram tokenizer internal state" );
return nullptr;
}
uint64_t GetSettingsFNV() const noexcept final
{
if ( m_pFather )
return m_pFather->GetSettingsFNV();
uint64_t uHash = CSphTokenFilter::GetSettingsFNV();
uHash = sphFNV64 ( m_dWords.Begin(), m_dWords.GetLength(), uHash );
return uHash;
}
};
void Tokenizer::AddBigramFilterTo ( TokenizerRefPtr_c& pTokenizer, ESphBigram eBigramIndex, const CSphString& sBigramWords, CSphString& sError )
{
if ( !pTokenizer || eBigramIndex == SPH_BIGRAM_NONE )
return;
StrVec_t dFreq;
if ( eBigramIndex != SPH_BIGRAM_ALL )
{
const BYTE* pTok;
pTokenizer->SetBuffer ( (const BYTE*)sBigramWords.cstr(), sBigramWords.Length() );
while ( ( pTok = pTokenizer->GetToken() ) )
dFreq.Add ( (const char*)pTok );
if ( dFreq.IsEmpty() )
{
sError.SetSprintf ( "bigram_freq_words does not contain any valid words" );
return;
}
}
pTokenizer = new BigramTokenizer ( std::move ( pTokenizer ), eBigramIndex, dFreq );
}
| 7,440
|
C++
|
.cpp
| 225
| 29.84
| 144
| 0.671263
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,969
|
tokenizerbase.cpp
|
manticoresoftware_manticoresearch/src/tokenizer/tokenizerbase.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "tokenizerbase_impl.h"
#include "lowercaser_impl.h"
#include "sphinxdefs.h"
#include "sphinxint.h"
#include "exceptions_trie.h"
#include "fileio.h"
CSphTokenizerBase::CSphTokenizerBase()
{
m_pAccum = m_sAccum;
}
CSphTokenizerBase::~CSphTokenizerBase()
{
SafeDelete ( m_pExc );
}
bool CSphTokenizerBase::SetCaseFolding ( const char* sConfig, CSphString& sError )
{
if ( m_pExc )
{
sError = "SetCaseFolding() must not be called after LoadSynonyms()";
return false;
}
m_bHasBlend = false;
return ISphTokenizer::SetCaseFolding ( sConfig, sError );
}
bool CSphTokenizerBase::SetBlendChars ( const char* sConfig, CSphString& sError )
{
m_bHasBlend = ISphTokenizer::SetBlendChars ( sConfig, sError );
return m_bHasBlend;
}
bool CSphTokenizerBase::LoadSynonyms ( const char* sFilename, const CSphEmbeddedFiles* pFiles, StrVec_t& dWarnings, CSphString& sError )
{
CSphString sWarning;
ExceptionsTrieGen_c g;
if ( pFiles )
{
m_tSynFileInfo = pFiles->m_tSynonymFile;
ARRAY_FOREACH ( i, pFiles->m_dSynonyms )
{
if ( !g.ParseLine ( const_cast<char*> ( pFiles->m_dSynonyms[i].cstr() ), sWarning ) )
{
sWarning.SetSprintf ( "%s line %d: %s", pFiles->m_tSynonymFile.m_sFilename.cstr(), i, sWarning.cstr() );
dWarnings.Add ( sWarning );
sphWarning ( "%s", sWarning.cstr() );
}
}
} else
{
if ( !sFilename || !*sFilename )
return true;
m_tSynFileInfo.Collect ( sFilename );
CSphAutoreader tReader;
if ( !tReader.Open ( sFilename, sError ) )
return false;
char sBuffer[1024];
int iLine = 0;
while ( tReader.GetLine ( sBuffer, sizeof ( sBuffer ) ) >= 0 )
{
iLine++;
if ( !g.ParseLine ( sBuffer, sWarning ) )
{
sWarning.SetSprintf ( "%s line %d: %s", sFilename, iLine, sWarning.cstr() );
dWarnings.Add ( sWarning );
sphWarning ( "%s", sWarning.cstr() );
}
}
}
m_pExc = g.Build();
return true;
}
void CSphTokenizerBase::WriteSynonyms ( Writer_i & tWriter ) const
{
if ( m_pExc )
m_pExc->Export ( tWriter );
else
tWriter.PutDword ( 0 );
}
void CSphTokenizerBase::WriteSynonyms ( JsonEscapedBuilder & tOut ) const
{
if ( !m_pExc )
return;
tOut.Named ( "synonyms" );
auto _ = tOut.ArrayW();
m_pExc->Export ( tOut );
}
void CSphTokenizerBase::CloneBase ( const CSphTokenizerBase* pFrom, ESphTokenizerClone eMode )
{
m_eMode = eMode;
m_pExc = nullptr;
if ( pFrom->m_pExc )
{
m_pExc = new ExceptionsTrie_c;
*m_pExc = *pFrom->m_pExc;
}
m_tSettings = pFrom->m_tSettings;
m_bHasBlend = pFrom->m_bHasBlend;
m_uBlendVariants = pFrom->m_uBlendVariants;
m_bBlendSkipPure = pFrom->m_bBlendSkipPure;
m_bShortTokenFilter = ( m_eMode != SPH_CLONE_INDEX );
m_bDetectSentences = pFrom->m_bDetectSentences;
// By default, we operate with read-only refcounted pointer to prepared lowercaser.
// Any changing operation uses special write-enabled pointer, which is null by default, and also forcibly reset to null if we clone the pointer, in order to protect clone from changes of parent.
// So, 'just clone' for querying is ok. Clone and add some additional symbols = full clone.
if ( eMode == pFrom->m_eMode || eMode == SPH_CLONE )
{
SetLC ( pFrom->GetLC() );
m_eMode = pFrom->m_eMode;
return;
}
// assume clones are not compatible between each other. So, clone any kind of query possibly either from the same, either from index, but not from another kind of query.
assert ( pFrom->m_eMode == SPH_CLONE_INDEX );
if ( eMode != SPH_CLONE_INDEX )
m_uBlendVariants = BLEND_TRIM_NONE;
switch ( eMode )
{
case SPH_CLONE_QUERY_WILD_EXACT_JSON:
SetLC ( pFrom->GetLC()->GetQueryWildExactJsonLC() );
break;
case SPH_CLONE_QUERY_WILD_EXACT:
SetLC ( pFrom->GetLC()->GetQueryWildExactLC() );
break;
case SPH_CLONE_QUERY_WILD_JSON:
SetLC ( pFrom->GetLC()->GetQueryWildJsonLC() );
break;
case SPH_CLONE_QUERY_WILD:
SetLC ( pFrom->GetLC()->GetQueryWildLC() );
break;
case SPH_CLONE_QUERY_EXACT_JSON:
SetLC ( pFrom->GetLC()->GetQueryExactJsonLC() );
break;
case SPH_CLONE_QUERY_EXACT:
SetLC ( pFrom->GetLC()->GetQueryExactLC() );
break;
case SPH_CLONE_QUERY_:
SetLC ( pFrom->GetLC()->GetQuery_LC() );
break;
case SPH_CLONE_QUERY:
SetLC ( pFrom->GetLC()->GetQueryLC() );
break;
case SPH_CLONE_INDEX:
default:
SetLC ( pFrom->GetLC() );
}
}
uint64_t CSphTokenizerBase::GetSettingsFNV() const noexcept
{
uint64_t uHash = ISphTokenizer::GetSettingsFNV();
DWORD uFlags = 0;
if ( m_bHasBlend )
uFlags |= 1 << 0;
uHash = sphFNV64 ( &uFlags, sizeof ( uFlags ), uHash );
return uHash;
}
void CSphTokenizerBase::SetBufferPtr ( const char* sNewPtr )
{
assert ( (const BYTE*)sNewPtr >= m_pBuffer && (const BYTE*)sNewPtr <= m_pBufferMax );
m_pCur = Min ( m_pBufferMax, Max ( m_pBuffer, (const BYTE*)sNewPtr ) );
m_iAccum = 0;
m_pAccum = m_sAccum;
m_pTokenStart = m_pTokenEnd = nullptr;
m_pBlendStart = m_pBlendEnd = nullptr;
}
/// adjusts blending magic when we're about to return a token (any token)
/// returns false if current token should be skipped, true otherwise
bool CSphTokenizerBase::BlendAdjust ( const BYTE* pCur )
{
// check if all we got is a bunch of blended characters (pure-blended case)
if ( m_bBlended && !m_bNonBlended )
{
// we either skip this token, or pretend it was normal
// in both cases, clear the flag
m_bBlended = false;
// do we need to skip it?
if ( m_bBlendSkipPure )
{
m_pBlendStart = NULL;
return false;
}
}
m_bNonBlended = false;
// adjust buffer pointers
if ( m_bBlended && m_pBlendStart )
{
// called once per blended token, on processing start
// at this point, full blended token is in the accumulator
// and we're about to return it
m_pCur = m_pBlendStart;
m_pBlendEnd = pCur;
m_pBlendStart = nullptr;
m_bBlendedPart = true;
} else if ( pCur >= m_pBlendEnd )
{
// tricky bit, as at this point, token we're about to return
// can either be a blended subtoken, or the next one
m_bBlendedPart = m_pTokenStart && ( m_pTokenStart < m_pBlendEnd );
m_pBlendEnd = nullptr;
m_pBlendStart = nullptr;
} else if ( !m_pBlendEnd )
{
// we aren't re-parsing blended; so clear the "blended subtoken" flag
m_bBlendedPart = false;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
static inline bool IsCapital ( int iCh )
{
return iCh >= 'A' && iCh <= 'Z';
}
static inline bool IsWhitespace ( BYTE c )
{
return ( c == '\0' || c == ' ' || c == '\t' || c == '\r' || c == '\n' );
}
static inline bool IsBoundary ( BYTE c, bool bPhrase )
{
// FIXME? sorta intersects with specials
// then again, a shortened-down list (more strict syntax) is reasonble here too
return IsWhitespace ( c ) || c == '"' || ( !bPhrase && ( c == '(' || c == ')' || c == '|' ) );
}
int CSphTokenizerBase::CodepointArbitrationI ( int iCode )
{
if ( !m_bDetectSentences )
return iCode;
// detect sentence boundaries
// FIXME! should use charset_table (or add a new directive) and support languages other than English
int iSymbol = iCode & MASK_CODEPOINT;
if ( iSymbol == '?' || iSymbol == '!' )
{
// definitely a sentence boundary
return MAGIC_CODE_SENTENCE | FLAG_CODEPOINT_SPECIAL;
}
if ( iSymbol == '.' )
{
// inline dot ("in the U.K and"), not a boundary
bool bInwordDot = ( sphIsAlpha ( m_pCur[0] ) || ( m_pCur[0] & 0x80 ) == 0x80 // IsAlpha to consider UTF8 chars
|| m_pCur[0] == ',' );
// followed by a small letter or an opening paren, not a boundary
// FIXME? might want to scan for more than one space
// Yoyodine Inc. exists ...
// Yoyodine Inc. (the company) ..
bool bInphraseDot = ( sphIsSpace ( m_pCur[0] )
&& ( ( 'a' <= m_pCur[1] && m_pCur[1] <= 'z' )
|| ( m_pCur[1] == '(' && 'a' <= m_pCur[2] && m_pCur[2] <= 'z' ) ) );
// preceded by something that looks like a middle name, opening first name, salutation
bool bMiddleName = false;
switch ( m_iAccum )
{
case 1:
// 1-char capital letter
// example: J. R. R. Tolkien, who wrote Hobbit ...
// example: John D. Doe ...
bMiddleName = IsCapital ( m_pCur[-2] );
break;
case 2:
// 2-char token starting with a capital
if ( IsCapital ( m_pCur[-3] ) )
{
// capital+small
// example: Known as Mr. Doe ...
if ( !IsCapital ( m_pCur[-2] ) )
bMiddleName = true;
// known capital+capital (MR, DR, MS)
if (
( m_pCur[-3] == 'M' && m_pCur[-2] == 'R' ) || ( m_pCur[-3] == 'M' && m_pCur[-2] == 'S' ) || ( m_pCur[-3] == 'D' && m_pCur[-2] == 'R' ) )
bMiddleName = true;
}
break;
case 3:
// preceded by a known 3-byte token (MRS, DRS)
// example: Survived by Mrs. Doe ...
if ( ( m_sAccum[0] == 'm' || m_sAccum[0] == 'd' ) && m_sAccum[1] == 'r' && m_sAccum[2] == 's' )
bMiddleName = true;
break;
}
if ( !bInwordDot && !bInphraseDot && !bMiddleName )
{
// sentence boundary
return MAGIC_CODE_SENTENCE | FLAG_CODEPOINT_SPECIAL;
} else
{
// just a character
if ( ( iCode & MASK_FLAGS ) == FLAG_CODEPOINT_SPECIAL )
return 0; // special only, not dual? then in this context, it is a separator
else
return iCode & ~( FLAG_CODEPOINT_SPECIAL | FLAG_CODEPOINT_DUAL ); // perhaps it was blended, so return the original code
}
}
// pass-through
return iCode;
}
int CSphTokenizerBase::CodepointArbitrationQ ( int iCode, bool bWasEscaped, BYTE uNextByte )
{
if ( iCode & FLAG_CODEPOINT_NGRAM )
return iCode; // ngrams are handled elsewhere
int iSymbol = iCode & MASK_CODEPOINT;
// codepoints can't be blended and special at the same time
if ( ( iCode & FLAG_CODEPOINT_BLEND ) && ( iCode & FLAG_CODEPOINT_SPECIAL ) )
{
bool bBlend =
bWasEscaped || // escaped characters should always act as blended
( m_bPhrase && !sphIsModifier ( iSymbol ) && iSymbol != '"' ) || // non-modifier special inside phrase
( m_iAccum && ( iSymbol == '@' || iSymbol == '/' || iSymbol == '-' ) ); // some specials in the middle of a token
// clear special or blend flags
iCode &= bBlend
? ~( FLAG_CODEPOINT_DUAL | FLAG_CODEPOINT_SPECIAL )
: ~( FLAG_CODEPOINT_DUAL | FLAG_CODEPOINT_BLEND );
}
// escaped specials are not special
// dash and dollar inside the word are not special (however, single opening modifier is not a word!)
// non-modifier specials within phrase are not special
bool bDashInside = ( m_iAccum && iSymbol == '-' && !( m_iAccum == 1 && sphIsModifier ( m_sAccum[0] ) ) );
if ( iCode & FLAG_CODEPOINT_SPECIAL )
if ( bWasEscaped
|| bDashInside
|| ( m_iAccum && iSymbol == '$' && !IsBoundary ( uNextByte, m_bPhrase ) )
|| ( m_bPhrase && iSymbol != '"' && !sphIsModifier ( iSymbol ) ) )
{
if ( iCode & FLAG_CODEPOINT_DUAL )
iCode &= ~( FLAG_CODEPOINT_SPECIAL | FLAG_CODEPOINT_DUAL );
else
iCode = 0;
}
// if we didn't remove special by now, it must win
if ( iCode & FLAG_CODEPOINT_DUAL )
{
assert ( iCode & FLAG_CODEPOINT_SPECIAL );
iCode = iSymbol | FLAG_CODEPOINT_SPECIAL;
}
// ideally, all conflicts must be resolved here
// well, at least most
assert ( sphBitCount ( iCode & MASK_FLAGS ) <= 1 );
return iCode;
}
| 11,599
|
C++
|
.cpp
| 343
| 31.058309
| 195
| 0.664941
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,970
|
source_pgsql.cpp
|
manticoresoftware_manticoresearch/src/indexing_sources/source_pgsql.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "source_pgsql.h"
#include <libpq-fe.h>
#include <config_indexer.h>
/////////////////////////////////////////////////////////////////////////////
// PGSQL SOURCE
/////////////////////////////////////////////////////////////////////////////
#ifndef POSTGRESQL_LIB
#define POSTGRESQL_LIB nullptr
#endif
static const char * GET_POSTGRESQL_LIB ()
{
const char * szEnv = getenv ( "POSTGRESQL_LIB" );
if ( szEnv )
return szEnv;
return POSTGRESQL_LIB;
}
#if DL_POSTGRESQL
static decltype (&PQgetvalue) sph_PQgetvalue = nullptr;
static decltype (&PQgetlength) sph_PQgetlength = nullptr;
static decltype (&PQclear) sph_PQclear = nullptr;
static decltype (&PQsetdbLogin) sph_PQsetdbLogin = nullptr;
static decltype (&PQstatus) sph_PQstatus = nullptr;
static decltype (&PQsetClientEncoding) sph_PQsetClientEncoding = nullptr;
static decltype (&PQexec) sph_PQexec = nullptr;
static decltype (&PQresultStatus) sph_PQresultStatus = nullptr;
static decltype (&PQntuples) sph_PQntuples = nullptr;
static decltype (&PQfname) sph_PQfname = nullptr;
static decltype (&PQnfields) sph_PQnfields = nullptr;
static decltype (&PQfinish) sph_PQfinish = nullptr;
static decltype (&PQerrorMessage) sph_PQerrorMessage = nullptr;
static decltype (&PQunescapeBytea) sph_PQunescapeBytea = nullptr;
static decltype (&PQfreemem) sph_PQfreemem = nullptr;
static bool InitDynamicPosgresql ()
{
const char * sFuncs[] = {"PQgetvalue", "PQgetlength", "PQclear",
"PQsetdbLogin", "PQstatus", "PQsetClientEncoding", "PQexec",
"PQresultStatus", "PQntuples", "PQfname", "PQnfields",
"PQfinish", "PQerrorMessage", "PQunescapeBytea", "PQfreemem" };
void ** pFuncs[] = {(void**)&sph_PQgetvalue, (void**)&sph_PQgetlength, (void**)&sph_PQclear,
(void**)&sph_PQsetdbLogin, (void**)&sph_PQstatus, (void**)&sph_PQsetClientEncoding,
(void**)&sph_PQexec, (void**)&sph_PQresultStatus, (void**)&sph_PQntuples,
(void**)&sph_PQfname, (void**)&sph_PQnfields, (void**)&sph_PQfinish,
(void**)&sph_PQerrorMessage, (void**)&sph_PQunescapeBytea, (void**)&sph_PQfreemem};
static CSphDynamicLibrary dLib ( GET_POSTGRESQL_LIB() );
return dLib.LoadSymbols ( sFuncs, pFuncs, sizeof ( pFuncs ) / sizeof ( void ** ) );
}
#else
#define sph_PQgetvalue PQgetvalue
#define sph_PQgetlength PQgetlength
#define sph_PQclear PQclear
#define sph_PQsetdbLogin PQsetdbLogin
#define sph_PQstatus PQstatus
#define sph_PQsetClientEncoding PQsetClientEncoding
#define sph_PQexec PQexec
#define sph_PQresultStatus PQresultStatus
#define sph_PQntuples PQntuples
#define sph_PQfname PQfname
#define sph_PQnfields PQnfields
#define sph_PQfinish PQfinish
#define sph_PQerrorMessage PQerrorMessage
#define sph_PQunescapeBytea PQunescapeBytea;
#define sph_PQfreemem PQfreemem;
#define InitDynamicPosgresql() (true)
#endif
/// PgSQL source implementation
/// multi-field plain-text documents fetched from given query
struct CSphSource_PgSQL : CSphSource_SQL
{
explicit CSphSource_PgSQL ( const char * sName );
bool SetupPgSQL ( const CSphSourceParams_PgSQL & tParams );
bool IterateStart ( CSphString & sError ) final;
protected:
PGresult * m_pPgResult = nullptr; ///< postgresql execution restult context
PGconn * m_tPgDriver = nullptr; ///< postgresql connection context
int m_iPgRows = 0; ///< how much rows last step returned
int m_iPgRow = 0; ///< current row (0 based, as in PQgetvalue)
CSphString m_sPgClientEncoding;
CSphVector<bool> m_dIsColumnBool;
protected:
void SqlDismissResult () final;
bool SqlQuery ( const char * sQuery ) final;
bool SqlIsError () final;
const char * SqlError () final;
bool SqlConnect () final;
void SqlDisconnect () final;
int SqlNumFields() final;
bool SqlFetchRow() final;
DWORD SqlColumnLength ( int iIndex ) final;
const char * SqlColumn ( int iIndex ) final;
const char * SqlFieldName ( int iIndex ) final;
Str_t SqlCompressedColumnStream ( int iFieldIndex ) final;
void SqlCompressedColumnReleaseStream ( Str_t tStream ) final;
};
CSphSourceParams_PgSQL::CSphSourceParams_PgSQL ()
{
m_iRangeStep = 1024;
m_uPort = 5432;
}
CSphSource_PgSQL::CSphSource_PgSQL ( const char * sName )
: CSphSource_SQL ( sName )
{
m_bCanUnpack = true;
}
bool CSphSource_PgSQL::SqlIsError ()
{
return ( m_iPgRow<m_iPgRows ); // if we're over, it's just last row
}
const char * CSphSource_PgSQL::SqlError ()
{
if ( sph_PQerrorMessage!=nullptr )
return sph_PQerrorMessage ( m_tPgDriver );
return "PgSQL source wasn't initialized. Wrong name in dlopen?";
}
bool CSphSource_PgSQL::SetupPgSQL ( const CSphSourceParams_PgSQL & tParams )
{
// checks
CSphSource_SQL::SetupSQL ( tParams );
m_sPgClientEncoding = tParams.m_sClientEncoding;
if ( !m_sPgClientEncoding.cstr() )
m_sPgClientEncoding = "";
// build and store DSN for error reporting
char sBuf [ 1024 ];
snprintf ( sBuf, sizeof(sBuf), "pgsql%s", m_sSqlDSN.cstr()+3 );
m_sSqlDSN = sBuf;
return true;
}
bool CSphSource_PgSQL::IterateStart ( CSphString & sError )
{
bool bResult = CSphSource_SQL::IterateStart ( sError );
if ( !bResult )
return false;
int iMaxIndex = 0;
for ( int i = 0; i < m_tSchema.GetAttrsCount(); i++ )
iMaxIndex = Max ( iMaxIndex, m_tSchema.GetAttr(i).m_iIndex );
for ( int i = 0; i < m_tSchema.GetFieldsCount(); i++ )
iMaxIndex = Max ( iMaxIndex, m_tSchema.GetField(i).m_iIndex );
m_dIsColumnBool.Resize ( iMaxIndex + 1 );
ARRAY_FOREACH ( i, m_dIsColumnBool )
m_dIsColumnBool[i] = false;
for ( int i = 0; i < m_tSchema.GetAttrsCount(); i++ )
{
if ( m_tSchema.GetAttr(i).m_iIndex >=0 )
m_dIsColumnBool [ m_tSchema.GetAttr(i).m_iIndex ] = ( m_tSchema.GetAttr(i).m_eAttrType==SPH_ATTR_BOOL );
}
return true;
}
bool CSphSource_PgSQL::SqlConnect ()
{
if ( !InitDynamicPosgresql() )
{
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-CONNECT: FAIL (NO POSGRES CLIENT LIB, tried %s)\n", GET_POSTGRESQL_LIB() );
return false;
}
char sPort[64];
snprintf ( sPort, sizeof(sPort), "%d", m_tParams.m_uPort );
m_tPgDriver = sph_PQsetdbLogin ( m_tParams.m_sHost.cstr(), sPort, NULL, NULL,
m_tParams.m_sDB.cstr(), m_tParams.m_sUser.cstr(), m_tParams.m_sPass.cstr() );
if ( sph_PQstatus ( m_tPgDriver )==CONNECTION_BAD )
{
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-CONNECT: FAIL\n" );
return false;
}
// set client encoding
if ( !m_sPgClientEncoding.IsEmpty() )
if ( -1==sph_PQsetClientEncoding ( m_tPgDriver, m_sPgClientEncoding.cstr() ) )
{
SqlDisconnect ();
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-CONNECT: FAIL\n" );
return false;
}
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-CONNECT: ok\n" );
return true;
}
void CSphSource_PgSQL::SqlDisconnect ()
{
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-DISCONNECT\n" );
sph_PQfinish ( m_tPgDriver );
}
bool CSphSource_PgSQL::SqlQuery ( const char * sQuery )
{
m_iPgRow = -1;
m_iPgRows = 0;
m_pPgResult = sph_PQexec ( m_tPgDriver, sQuery );
ExecStatusType eRes = sph_PQresultStatus ( m_pPgResult );
if ( ( eRes!=PGRES_COMMAND_OK ) && ( eRes!=PGRES_TUPLES_OK ) )
{
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-QUERY: %s: FAIL\n", sQuery );
return false;
}
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-QUERY: %s: ok\n", sQuery );
m_iPgRows = sph_PQntuples ( m_pPgResult );
return true;
}
void CSphSource_PgSQL::SqlDismissResult ()
{
if ( !m_pPgResult )
return;
sph_PQclear ( m_pPgResult );
m_pPgResult = NULL;
}
int CSphSource_PgSQL::SqlNumFields ()
{
if ( !m_pPgResult )
return -1;
return sph_PQnfields ( m_pPgResult );
}
const char * CSphSource_PgSQL::SqlColumn ( int iIndex )
{
if ( !m_pPgResult )
return NULL;
const char * szValue = sph_PQgetvalue ( m_pPgResult, m_iPgRow, iIndex );
if ( m_dIsColumnBool.GetLength() && m_dIsColumnBool[iIndex] && szValue[0]=='t' && !szValue[1] )
return "1";
return szValue;
}
const char * CSphSource_PgSQL::SqlFieldName ( int iIndex )
{
if ( !m_pPgResult )
return NULL;
return sph_PQfname ( m_pPgResult, iIndex );
}
bool CSphSource_PgSQL::SqlFetchRow ()
{
if ( !m_pPgResult )
return false;
return ( ++m_iPgRow<m_iPgRows );
}
DWORD CSphSource_PgSQL::SqlColumnLength ( int iIndex )
{
return sph_PQgetlength ( m_pPgResult, m_iPgRow, iIndex );
}
Str_t CSphSource_PgSQL::SqlCompressedColumnStream ( int iFieldIndex )
{
auto tRes = SqlColumnStream ( iFieldIndex );
if ( tRes.first )
{
size_t uSize;
tRes.first = (const char*)sph_PQunescapeBytea ( (const unsigned char*)tRes.first, &uSize );
assert ( uSize < INT_MAX );
tRes.second = int(uSize);
}
return tRes;
}
void CSphSource_PgSQL::SqlCompressedColumnReleaseStream ( Str_t tStream )
{
if ( tStream.first )
sph_PQfreemem( (void*)tStream.first );
}
// the fabrics
CSphSource * CreateSourcePGSQL ( const CSphSourceParams_PgSQL & tParams, const char * sSourceName )
{
auto * pSrc = new CSphSource_PgSQL ( sSourceName );
if ( !pSrc->SetupPgSQL ( tParams ) )
SafeDelete ( pSrc );
return pSrc;
}
| 9,563
|
C++
|
.cpp
| 274
| 32.70438
| 107
| 0.710178
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,971
|
source_mysql.cpp
|
manticoresoftware_manticoresearch/src/indexing_sources/source_mysql.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "source_mysql.h"
#if _WIN32
#include <winsock2.h>
#endif
#include <mysql.h>
#include <config_indexer.h>
/////////////////////////////////////////////////////////////////////////////
// MYSQL SOURCE
/////////////////////////////////////////////////////////////////////////////
#ifndef MYSQL_LIB
#define MYSQL_LIB nullptr
#endif
static const char * GET_MYSQL_LIB()
{
const char * szEnv = getenv ( "MYSQL_LIB" );
if ( szEnv )
return szEnv;
return MYSQL_LIB;
}
#define MYSQL57PLUS MYSQL_VERSION_ID >= 50700
#if MYSQL57PLUS
#define MYSQL_OPTIONS_OR_SSL_SET mysql_options
#else
#define MYSQL_OPTIONS_OR_SSL_SET mysql_ssl_set
#endif
#define XSTR( s ) STR ( s )
#define STR( s ) #s
#if DL_MYSQL
static decltype (&mysql_free_result) sph_mysql_free_result = nullptr;
static decltype (&mysql_next_result) sph_mysql_next_result = nullptr;
static decltype (&mysql_use_result) sph_mysql_use_result = nullptr;
static decltype (&mysql_num_rows) sph_mysql_num_rows = nullptr;
static decltype (&mysql_query) sph_mysql_query = nullptr;
static decltype (&mysql_errno) sph_mysql_errno = nullptr;
static decltype (&mysql_error) sph_mysql_error = nullptr;
static decltype (&mysql_init) sph_mysql_init = nullptr;
static decltype ( &MYSQL_OPTIONS_OR_SSL_SET ) sph_mysql_options_or_ssl_set = nullptr;
static decltype (&mysql_real_connect) sph_mysql_real_connect = nullptr;
static decltype (&mysql_close) sph_mysql_close = nullptr;
static decltype (&mysql_num_fields) sph_mysql_num_fields = nullptr;
static decltype (&mysql_fetch_row) sph_mysql_fetch_row = nullptr;
static decltype (&mysql_fetch_fields) sph_mysql_fetch_fields = nullptr;
static decltype (&mysql_fetch_lengths) sph_mysql_fetch_lengths = nullptr;
static bool InitDynamicMysql()
{
const char * sFuncs[] = { "mysql_free_result", "mysql_next_result", "mysql_use_result"
, "mysql_num_rows", "mysql_query", "mysql_errno", "mysql_error"
, "mysql_init", XSTR ( MYSQL_OPTIONS_OR_SSL_SET ), "mysql_real_connect", "mysql_close"
, "mysql_num_fields", "mysql_fetch_row", "mysql_fetch_fields"
, "mysql_fetch_lengths" };
void ** pFuncs[] = { (void **) &sph_mysql_free_result, (void **) &sph_mysql_next_result
, (void **) &sph_mysql_use_result, (void **) &sph_mysql_num_rows, (void **) &sph_mysql_query
, (void **) &sph_mysql_errno, (void **) &sph_mysql_error, (void **) &sph_mysql_init
, (void **) &sph_mysql_options_or_ssl_set, (void **) &sph_mysql_real_connect, (void **) &sph_mysql_close
, (void **) &sph_mysql_num_fields, (void **) &sph_mysql_fetch_row
, (void **) &sph_mysql_fetch_fields, (void **) &sph_mysql_fetch_lengths };
static CSphDynamicLibrary dLib ( GET_MYSQL_LIB() );
return dLib.LoadSymbols ( sFuncs, pFuncs, sizeof ( pFuncs ) / sizeof ( void ** ) );
}
#else
#define sph_mysql_free_result mysql_free_result
#define sph_mysql_next_result mysql_next_result
#define sph_mysql_use_result mysql_use_result
#define sph_mysql_num_rows mysql_num_rows
#define sph_mysql_query mysql_query
#define sph_mysql_errno mysql_errno
#define sph_mysql_error mysql_error
#define sph_mysql_init mysql_init
#define sph_mysql_options_or_ssl_set mysql_ssl_set
#define sph_mysql_real_connect mysql_real_connect
#define sph_mysql_close mysql_close
#define sph_mysql_num_fields mysql_num_fields
#define sph_mysql_fetch_row mysql_fetch_row
#define sph_mysql_fetch_fields mysql_fetch_fields
#define sph_mysql_fetch_lengths mysql_fetch_lengths
#define InitDynamicMysql() (true)
#endif
/// MySQL source implementation
/// multi-field plain-text documents fetched from given query
struct CSphSource_MySQL : CSphSource_SQL
{
explicit CSphSource_MySQL ( const char * sName );
bool SetupMySQL ( const CSphSourceParams_MySQL & tParams );
protected:
MYSQL_RES * m_pMysqlResult = nullptr;
MYSQL_FIELD * m_pMysqlFields = nullptr;
MYSQL_ROW m_tMysqlRow = nullptr;
MYSQL m_tMysqlDriver;
unsigned long * m_pMysqlLengths = nullptr;
CSphString m_sMysqlUsock;
unsigned long m_iMysqlConnectFlags = 0;
CSphString m_sSslKey;
CSphString m_sSslCert;
CSphString m_sSslCA;
protected:
void SqlDismissResult () override;
bool SqlQuery ( const char * sQuery ) override;
bool SqlIsError () override;
const char * SqlError () override;
bool SqlConnect () override;
void SqlDisconnect () override;
int SqlNumFields() override;
bool SqlFetchRow() override;
DWORD SqlColumnLength ( int iIndex ) override;
const char * SqlColumn ( int iIndex ) override;
const char * SqlFieldName ( int iIndex ) override;
};
CSphSourceParams_MySQL::CSphSourceParams_MySQL ()
: m_iFlags ( 0 )
{
m_uPort = 3306;
}
CSphSource_MySQL::CSphSource_MySQL ( const char * sName )
: CSphSource_SQL ( sName )
{
m_bCanUnpack = true;
memset ( &m_tMysqlDriver, 0, sizeof ( m_tMysqlDriver ) );
}
void CSphSource_MySQL::SqlDismissResult ()
{
if ( !m_pMysqlResult )
return;
while ( m_pMysqlResult )
{
sph_mysql_free_result ( m_pMysqlResult );
m_pMysqlResult = NULL;
// stored procedures might return multiple result sets
// FIXME? we might want to index all of them
// but for now, let's simply dismiss additional result sets
if ( sph_mysql_next_result ( &m_tMysqlDriver )==0 )
{
m_pMysqlResult = sph_mysql_use_result ( &m_tMysqlDriver );
static bool bOnce = false;
if ( !bOnce && m_pMysqlResult && sph_mysql_num_rows ( m_pMysqlResult ) )
{
sphWarn ( "indexing of multiple result sets is not supported yet; some results sets were dismissed!" );
bOnce = true;
}
}
}
m_pMysqlFields = nullptr;
m_pMysqlLengths = nullptr;
}
bool CSphSource_MySQL::SqlQuery ( const char * sQuery )
{
if ( sph_mysql_query ( &m_tMysqlDriver, sQuery ) )
{
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-QUERY: %s: FAIL\n", sQuery );
return false;
}
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-QUERY: %s: ok\n", sQuery );
m_pMysqlResult = sph_mysql_use_result ( &m_tMysqlDriver );
m_pMysqlFields = nullptr;
return true;
}
bool CSphSource_MySQL::SqlIsError ()
{
return sph_mysql_errno ( &m_tMysqlDriver )!=0;
}
const char * CSphSource_MySQL::SqlError ()
{
if ( sph_mysql_error!=nullptr )
return sph_mysql_error ( &m_tMysqlDriver );
return "MySQL source wasn't initialized. Wrong name in dlopen?";
}
bool CSphSource_MySQL::SqlConnect ()
{
if_const ( !InitDynamicMysql() )
{
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-CONNECT: FAIL (NO MYSQL CLIENT LIB, tried %s)\n", GET_MYSQL_LIB() );
return false;
}
sph_mysql_init ( &m_tMysqlDriver );
if ( !m_sSslKey.IsEmpty() || !m_sSslCert.IsEmpty() || !m_sSslCA.IsEmpty() )
{
#if MYSQL57PLUS
sph_mysql_options_or_ssl_set ( &m_tMysqlDriver, MYSQL_OPT_SSL_KEY, m_sSslKey.cstr() );
sph_mysql_options_or_ssl_set ( &m_tMysqlDriver, MYSQL_OPT_SSL_CERT, m_sSslCert.cstr() );
sph_mysql_options_or_ssl_set ( &m_tMysqlDriver, MYSQL_OPT_SSL_CA, m_sSslCA.cstr() );
#else
sph_mysql_options_or_ssl_set ( &m_tMysqlDriver, m_sSslKey.cstr(), m_sSslCert.cstr(), m_sSslCA.cstr(), NULL, NULL );
#endif
}
m_iMysqlConnectFlags |= CLIENT_MULTI_RESULTS; // we now know how to handle this
bool bRes = ( nullptr!=sph_mysql_real_connect ( &m_tMysqlDriver,
m_tParams.m_sHost.cstr(), m_tParams.m_sUser.cstr(), m_tParams.m_sPass.cstr(),
m_tParams.m_sDB.cstr(), m_tParams.m_uPort, m_sMysqlUsock.cstr(), m_iMysqlConnectFlags ) );
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, bRes ? "SQL-CONNECT: ok\n" : "SQL-CONNECT: FAIL\n" );
return bRes;
}
void CSphSource_MySQL::SqlDisconnect ()
{
m_pMysqlResult = nullptr;
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-DISCONNECT\n" );
sph_mysql_close ( &m_tMysqlDriver );
m_sCollectDump.FinishBlocks ();
auto fpDump = m_fpDumpRows;
if ( !fpDump )
fpDump = stdout;
if ( m_sCollectDump.GetLength ()>0)
fprintf ( fpDump, "%s", m_sCollectDump.cstr ());
m_sCollectDump.Clear ();
}
int CSphSource_MySQL::SqlNumFields ()
{
if ( !m_pMysqlResult )
return -1;
return sph_mysql_num_fields ( m_pMysqlResult );
}
bool CSphSource_MySQL::SqlFetchRow ()
{
if ( !m_pMysqlResult )
return false;
m_tMysqlRow = sph_mysql_fetch_row ( m_pMysqlResult );
return m_tMysqlRow!=NULL;
}
const char * CSphSource_MySQL::SqlColumn ( int iIndex )
{
if ( !m_pMysqlResult )
return NULL;
return m_tMysqlRow[iIndex];
}
const char * CSphSource_MySQL::SqlFieldName ( int iIndex )
{
if ( !m_pMysqlResult )
return NULL;
if ( !m_pMysqlFields )
m_pMysqlFields = sph_mysql_fetch_fields ( m_pMysqlResult );
return m_pMysqlFields[iIndex].name;
}
DWORD CSphSource_MySQL::SqlColumnLength ( int iIndex )
{
if ( !m_pMysqlResult )
return 0;
if ( !m_pMysqlLengths )
m_pMysqlLengths = sph_mysql_fetch_lengths ( m_pMysqlResult );
return m_pMysqlLengths[iIndex];
}
bool CSphSource_MySQL::SetupMySQL ( const CSphSourceParams_MySQL & tParams )
{
if ( !CSphSource_SQL::SetupSQL ( tParams ) )
return false;
m_sMysqlUsock = tParams.m_sUsock;
m_iMysqlConnectFlags = tParams.m_iFlags;
m_sSslKey = tParams.m_sSslKey;
m_sSslCert = tParams.m_sSslCert;
m_sSslCA = tParams.m_sSslCA;
// build and store DSN for error reporting
char sBuf [ 1024 ];
snprintf ( sBuf, sizeof(sBuf), "mysql%s", m_sSqlDSN.cstr()+3 );
m_sSqlDSN = sBuf;
return true;
}
// the fabrics
CSphSource * CreateSourceMysql ( const CSphSourceParams_MySQL & tParams, const char * sSourceName )
{
auto * pSrc = new CSphSource_MySQL ( sSourceName );
if ( !pSrc->SetupMySQL ( tParams ) )
SafeDelete ( pSrc );
return pSrc;
}
| 10,001
|
C++
|
.cpp
| 277
| 33.913357
| 117
| 0.70927
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,972
|
source_svpipe.cpp
|
manticoresoftware_manticoresearch/src/indexing_sources/source_svpipe.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "source_svpipe.h"
#include "indexcheck.h"
#include "schema_configurator.h"
#include "attribute.h"
#include "sphinxint.h"
#include "conversion.h"
#include "tokenizer/tokenizer.h"
class CSphSource_BaseSV : public CSphSource, public CSphSchemaConfigurator<CSphSource_BaseSV>
{
using BASE = CSphSource;
public:
explicit CSphSource_BaseSV ( const char * sName );
~CSphSource_BaseSV () override;
bool Connect ( CSphString & sError ) override; ///< run the command and open the pipe
void Disconnect () override; ///< close the pipe
const char * DecorateMessage ( const char * sTemplate, ... ) const __attribute__ ( ( format ( printf, 2, 3 ) ) );
bool IterateStart ( CSphString & ) override; ///< Connect() starts getting documents automatically, so this one is empty
BYTE ** NextDocument ( bool & bEOF, CSphString & ) override; ///< parse incoming chunk and emit some hits
const int * GetFieldLengths () const override { return m_dFieldLengths.Begin(); }
bool IterateMultivaluedStart ( int, CSphString & ) override { return false; }
bool IterateMultivaluedNext ( int64_t &, int64_t & ) override{ return false; }
bool IterateKillListStart ( CSphString & ) override { return false; }
bool IterateKillListNext ( DocID_t & ) override { return false; }
void Setup ( const CSphSourceSettings & tSettings, StrVec_t * pWarnings ) override;
bool SetupPipe ( const CSphConfigSection & hSource, FILE * pPipe, CSphString & sError );
protected:
enum ESphParseResult
{
PARSING_FAILED,
GOT_DOCUMENT,
DATA_OVER
};
CSphVector<BYTE> m_dBuf;
CSphFixedVector<char> m_dError {1024};
CSphFixedVector<int> m_dColumnsLen {0};
CSphVector<RemapXSV_t> m_dRemap;
// output
CSphFixedVector<BYTE *> m_dFields {0};
CSphFixedVector<int> m_dFieldLengths {0};
FILE * m_pFP = nullptr;
int m_iDataStart = 0; ///< where the next line to parse starts in m_dBuf
int m_iDocStart = 0; ///< where the last parsed document stats in m_dBuf
int m_iBufUsed = 0; ///< bytes [0,m_iBufUsed) are actually currently used; the rest of m_dBuf is free
int m_iLine = 0;
BYTE ** ReportDocumentError();
virtual bool SetupSchema ( const CSphConfigSection & hSource, bool bWordDict, CSphString & sError ) = 0;
virtual ESphParseResult SplitColumns ( CSphString & ) = 0;
private:
bool StoreAttribute ( int iAttr, int iOff );
};
class CSphSource_TSV : public CSphSource_BaseSV
{
using CSphSource_BaseSV::CSphSource_BaseSV;
public:
ESphParseResult SplitColumns ( CSphString & sError ) final; ///< parse incoming chunk and emit some hits
bool SetupSchema ( const CSphConfigSection & hSource, bool bWordDict, CSphString & sError ) final;
};
class CSphSource_CSV : public CSphSource_BaseSV
{
public:
explicit CSphSource_CSV ( const char * sName );
ESphParseResult SplitColumns ( CSphString & sError ) final; ///< parse incoming chunk and emit some hits
bool SetupSchema ( const CSphConfigSection & hSource, bool bWordDict, CSphString & sError ) final;
void SetDelimiter ( const char * sDelimiter );
private:
BYTE m_iDelimiter;
};
CSphSource * sphCreateSourceTSVpipe ( const CSphConfigSection * pSource, FILE * pPipe, const char * sSourceName )
{
CSphString sError;
auto * pTSV = new CSphSource_TSV(sSourceName);
if ( !pTSV->SetupPipe ( *pSource, pPipe, sError ) )
{
SafeDelete ( pTSV );
fprintf ( stdout, "ERROR: tsvpipe: %s", sError.cstr() );
}
return pTSV;
}
CSphSource * sphCreateSourceCSVpipe ( const CSphConfigSection * pSource, FILE * pPipe, const char * sSourceName )
{
CSphString sError;
auto sDelimiter = pSource->GetStr ( "csvpipe_delimiter" );
auto * pCSV = new CSphSource_CSV(sSourceName);
pCSV->SetDelimiter ( sDelimiter.cstr() );
if ( !pCSV->SetupPipe ( *pSource, pPipe, sError ) )
{
SafeDelete ( pCSV );
fprintf ( stdout, "ERROR: csvpipe: %s", sError.cstr() );
}
return pCSV;
}
CSphSource_BaseSV::CSphSource_BaseSV ( const char * sName )
: CSphSource ( sName )
{}
CSphSource_BaseSV::~CSphSource_BaseSV ()
{
Disconnect();
}
bool CSphSource_BaseSV::SetupPipe ( const CSphConfigSection & hSource, FILE * pPipe, CSphString & sError )
{
m_pFP = pPipe;
m_tSchema.Reset ();
bool bWordDict = ( m_pDict && m_pDict->GetSettings().m_bWordDict );
if ( !SetupSchema ( hSource, bWordDict, sError ) )
return false;
if ( !DebugCheckSchema ( m_tSchema, sError ) )
return false;
if ( !AddAutoAttrs ( sError ) )
return false;
int nFields = m_tSchema.GetFieldsCount();
m_dFields.Reset ( nFields );
m_dFieldLengths.Reset ( nFields );
// build hash from schema names
SmallStringHash_T<RemapXSV_t> hSchema;
for ( int i=0; i < m_tSchema.GetFieldsCount(); i++ )
{
RemapXSV_t tField;
tField.m_iField = i;
hSchema.Add ( tField, m_tSchema.GetFieldName(i) );
}
for ( int i=0; i<m_tSchema.GetAttrsCount(); i++ )
{
const CSphString & sAttrName = m_tSchema.GetAttr(i).m_sName;
RemapXSV_t * pRemap = hSchema ( sAttrName );
if ( pRemap )
pRemap->m_iAttr = i;
else
{
RemapXSV_t tAttr;
tAttr.m_iAttr = i;
hSchema.Add ( tAttr, sAttrName );
}
}
// restore order for declared columns
CSphString sColumn;
for ( const auto& tVal : hSource )
{
const CSphVariant * pVal = &tVal.second;
while ( pVal )
{
sColumn = pVal->strval();
// uint attribute might have bit count that should by cut off from name
const char * pColon = strchr ( sColumn.cstr(), ':' );
if ( pColon )
{
int iColon = int ( pColon-sColumn.cstr() );
CSphString sTmp;
sTmp.SetBinary ( sColumn.cstr(), iColon );
sColumn.Swap ( sTmp );
}
// let's handle different char cases
sColumn.ToLower();
RemapXSV_t * pColumn = hSchema ( sColumn );
assert ( !pColumn || pColumn->m_iAttr>=0 || pColumn->m_iField>=0 );
assert ( !pColumn || pColumn->m_iTag==-1 );
if ( pColumn )
pColumn->m_iTag = pVal->m_iTag;
pVal = pVal->m_pNext;
}
}
RemapXSV_t * pIdCol = hSchema ( sphGetDocidName() );
assert ( pIdCol && pIdCol->m_iTag==-1 && pIdCol->m_iAttr==0 );
pIdCol->m_iTag = 0;
for ( const auto& tCol : hSchema )
if ( tCol.second.m_iTag>=0 )
m_dRemap.Add ( tCol.second );
m_dColumnsLen.Reset ( m_dRemap.GetLength() );
sphSort ( m_dRemap.Begin(), m_dRemap.GetLength(), bind ( &RemapXSV_t::m_iTag ) );
return true;
}
void CSphSource_BaseSV::Setup ( const CSphSourceSettings & tSettings, StrVec_t * pWarnings )
{
// detect a case when blob row locator was removed (because all blobs became columnar)
bool bHadLocator = !!m_tSchema.GetAttr ( sphGetBlobLocatorName() );
BASE::Setup ( tSettings, pWarnings );
bool bHaveLocator = !!m_tSchema.GetAttr ( sphGetBlobLocatorName() );
if ( bHadLocator && !bHaveLocator )
{
const int iBlobLocatorId = 1;
for ( auto & i : m_dRemap )
if ( i.m_iAttr>iBlobLocatorId )
i.m_iAttr--;
}
}
bool CSphSource_BaseSV::Connect ( CSphString & sError )
{
// source settings have been updated after ::Setup
for ( int i = 0; i < m_tSchema.GetFieldsCount(); i++ )
{
ESphWordpart eWordpart = GetWordpart ( m_tSchema.GetFieldName(i), m_pDict && m_pDict->GetSettings().m_bWordDict );
m_tSchema.SetFieldWordpart ( i, eWordpart );
}
if ( !AddAutoAttrs ( sError ) )
return false;
AllocDocinfo();
m_tHits.Reserve ( m_iMaxHits );
m_dBuf.Resize ( DEFAULT_READ_BUFFER );
return true;
}
void CSphSource_BaseSV::Disconnect()
{
if ( m_pFP )
{
pclose ( m_pFP );
m_pFP = nullptr;
}
m_tHits.Reset();
}
const char * CSphSource_BaseSV::DecorateMessage ( const char * sTemplate, ... ) const
{
va_list ap;
va_start ( ap, sTemplate );
vsnprintf ( m_dError.Begin (), m_dError.GetLength (), sTemplate, ap );
va_end ( ap );
return m_dError.Begin();
}
static const BYTE g_dBOM[] = { 0xEF, 0xBB, 0xBF };
bool CSphSource_BaseSV::IterateStart ( CSphString & sError )
{
m_iLine = 0;
m_iDataStart = 0;
// initial buffer update
m_iBufUsed = (int) fread ( m_dBuf.Begin(), 1, m_dBuf.GetLength(), m_pFP );
if ( !m_iBufUsed )
{
sError.SetSprintf ( "source '%s': read error '%s'", m_tSchema.GetName(), strerrorm(errno) );
return false;
}
m_iPlainFieldsLength = m_tSchema.GetFieldsCount();
// space out BOM like xml-pipe does
if ( m_iBufUsed>(int)sizeof(g_dBOM) && memcmp ( m_dBuf.Begin(), g_dBOM, sizeof ( g_dBOM ) )==0 )
memset ( m_dBuf.Begin(), ' ', sizeof(g_dBOM) );
return true;
}
BYTE ** CSphSource_BaseSV::ReportDocumentError ()
{
m_tDocInfo.m_tRowID = 0; // INVALID_ROWID means legal eof
m_iDataStart = 0;
m_iBufUsed = 0;
return nullptr;
}
bool CSphSource_BaseSV::StoreAttribute ( int iAttr, int iOff )
{
// if+if for field-string attribute case
const RemapXSV_t & tRemap = m_dRemap[iAttr];
// field column
if ( tRemap.m_iField!=-1 )
{
m_dFields[tRemap.m_iField] = m_dBuf.Begin() + iOff;
m_dFieldLengths[tRemap.m_iField] = (int) strlen ( (char *)m_dFields[tRemap.m_iField] );
}
// attribute column
if ( tRemap.m_iAttr==-1 )
return true;
const CSphColumnInfo & tAttr = m_tSchema.GetAttr ( tRemap.m_iAttr );
const char * sVal = (const char *)m_dBuf.Begin() + iOff;
CSphString & sCurStrAttr = m_dStrAttrs[tRemap.m_iAttr];
SphAttr_t & tCurIntAttr = m_dAttrs[tRemap.m_iAttr];
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_STRING:
case SPH_ATTR_JSON:
sCurStrAttr = sVal;
break;
case SPH_ATTR_FLOAT:
{
float fValue = sphToFloat(sVal);
tCurIntAttr = sphF2DW(fValue);
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttrFloat ( tAttr.m_tLocator, fValue );
}
break;
case SPH_ATTR_BIGINT:
{
CSphString sWarn;
if ( tRemap.m_iAttr )
{
tCurIntAttr = sphToInt64 ( sVal, &sWarn );
if ( !sWarn.IsEmpty() )
sphWarn ( "%s", sWarn.cstr() );
}
else
{
tCurIntAttr = (int64_t)StrToDocID ( sVal, sWarn );
if ( !sWarn.IsEmpty() )
{
sphWarn ( "%s", sWarn.cstr() );
return false;
}
}
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttr ( tAttr.m_tLocator, tCurIntAttr );
}
break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
ParseFieldMVA ( tRemap.m_iAttr, sVal );
break;
case SPH_ATTR_TOKENCOUNT:
m_tDocInfo.SetAttr ( tAttr.m_tLocator, 0 );
break;
case SPH_ATTR_BOOL:
tCurIntAttr = sphToDword(sVal) ? 1 : 0;
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttr ( tAttr.m_tLocator, tCurIntAttr );
break;
default:
tCurIntAttr = sphToDword(sVal);
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttr ( tAttr.m_tLocator, tCurIntAttr );
break;
}
return true;
}
BYTE ** CSphSource_BaseSV::NextDocument ( bool & bEOF, CSphString & sError )
{
bEOF = false;
bool bSkipDoc = false;
do
{
ESphParseResult eRes = SplitColumns ( sError );
if ( eRes==PARSING_FAILED )
return ReportDocumentError();
else if ( eRes==DATA_OVER )
{
bEOF = true;
return nullptr;
}
assert ( eRes==GOT_DOCUMENT );
m_dMvas.Resize ( m_tSchema.GetAttrsCount() );
for ( auto & i : m_dMvas )
i.Resize(0);
int iOff = m_iDocStart;
bSkipDoc = false;
ARRAY_FOREACH ( i, m_dRemap )
{
if ( !StoreAttribute ( i, iOff ) )
{
bSkipDoc = true;
break;
}
iOff += m_dColumnsLen[i] + 1; // length of value plus null-terminator
}
m_iLine++;
}
while ( bSkipDoc );
return m_dFields.Begin();
}
CSphSource_BaseSV::ESphParseResult CSphSource_TSV::SplitColumns ( CSphString & sError )
{
int iColumns = m_dRemap.GetLength();
int iCol = 0;
int iColumnStart = m_iDataStart;
BYTE * pData = m_dBuf.Begin() + m_iDataStart;
const BYTE * pEnd = m_dBuf.Begin() + m_iBufUsed;
m_iDocStart = m_iDataStart;
while (true)
{
if ( iCol>=iColumns )
{
sError.SetSprintf ( "source '%s': too many columns found (found=%d, declared=%d, line=%d)", m_tSchema.GetName(), iCol, iColumns, m_iLine );
return CSphSource_BaseSV::PARSING_FAILED;
}
// move to next control symbol
while ( pData<pEnd && *pData && *pData!='\t' && *pData!='\r' && *pData!='\n' )
pData++;
if ( pData<pEnd )
{
assert ( *pData=='\t' || !*pData || *pData=='\r' || *pData=='\n' );
bool bNull = !*pData;
bool bEOL = ( *pData=='\r' || *pData=='\n' );
int iLen = int ( pData - m_dBuf.Begin() ) - iColumnStart;
assert ( iLen>=0 );
m_dColumnsLen[iCol] = iLen;
*pData++ = '\0';
iCol++;
if ( bNull )
{
// null terminated string found
m_iDataStart = m_iBufUsed = 0;
break;
} else if ( bEOL )
{
// end of document found
// skip all EOL characters
while ( pData<pEnd && *pData && ( *pData=='\r' || *pData=='\n' ) )
pData++;
break;
}
// column separator found
iColumnStart = int ( pData - m_dBuf.Begin() );
continue;
}
int iOff = int ( pData - m_dBuf.Begin() );
// if there is space at the start, move data around
// if not, resize the buffer
if ( m_iDataStart>0 )
{
memmove ( m_dBuf.Begin(), m_dBuf.Begin() + m_iDataStart, m_iBufUsed - m_iDataStart );
m_iBufUsed -= m_iDataStart;
iOff -= m_iDataStart;
iColumnStart -= m_iDataStart;
m_iDataStart = 0;
m_iDocStart = 0;
} else if ( m_iBufUsed==m_dBuf.GetLength() )
{
m_dBuf.Resize ( m_dBuf.GetLength()*2 );
}
// do read
auto iGot = (int) fread ( m_dBuf.Begin() + m_iBufUsed, 1, m_dBuf.GetLength() - m_iBufUsed, m_pFP );
if ( !iGot )
{
if ( !iCol )
{
// normal file termination - no pending columns and documents
m_iDataStart = m_iBufUsed = 0;
m_tDocInfo.m_tRowID = INVALID_ROWID;
return CSphSource_BaseSV::DATA_OVER;
}
// error in case no data left in middle of data stream
sError.SetSprintf ( "source '%s': read error '%s' (line=%d)", m_tSchema.GetName(), strerror(errno), m_iLine );
return CSphSource_BaseSV::PARSING_FAILED;
}
m_iBufUsed += iGot;
// restored pointers after buffer resize
pData = m_dBuf.Begin() + iOff;
pEnd = m_dBuf.Begin() + m_iBufUsed;
}
// all columns presence check
if ( iCol!=iColumns )
{
sError.SetSprintf ( "source '%s': not all columns found (found=%d, total=%d, line=%d)", m_tSchema.GetName(), iCol, iColumns, m_iLine );
return CSphSource_BaseSV::PARSING_FAILED;
}
// tail data
assert ( pData<=pEnd );
m_iDataStart = int ( pData - m_dBuf.Begin() );
return CSphSource_BaseSV::GOT_DOCUMENT;
}
bool CSphSource_TSV::SetupSchema ( const CSphConfigSection & hSource, bool bWordDict, CSphString & sError )
{
bool bOk = true;
bOk &= ConfigureAttrs ( hSource("tsvpipe_attr_uint"), SPH_ATTR_INTEGER, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("tsvpipe_attr_timestamp"), SPH_ATTR_TIMESTAMP, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("tsvpipe_attr_bool"), SPH_ATTR_BOOL, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("tsvpipe_attr_float"), SPH_ATTR_FLOAT, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("tsvpipe_attr_bigint"), SPH_ATTR_BIGINT, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("tsvpipe_attr_multi"), SPH_ATTR_UINT32SET, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("tsvpipe_attr_multi_64"), SPH_ATTR_INT64SET, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("tsvpipe_attr_string"), SPH_ATTR_STRING, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("tsvpipe_attr_json"), SPH_ATTR_JSON, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("tsvpipe_field_string"), SPH_ATTR_STRING, m_tSchema, sError );
if ( !bOk )
return false;
ConfigureFields ( hSource("tsvpipe_field"), bWordDict, m_tSchema );
ConfigureFields ( hSource("tsvpipe_field_string"), bWordDict, m_tSchema );
return true;
}
CSphSource_CSV::CSphSource_CSV ( const char * sName )
: CSphSource_BaseSV ( sName )
{
m_iDelimiter = BYTE ( ',' );
}
CSphSource_BaseSV::ESphParseResult CSphSource_CSV::SplitColumns ( CSphString & sError )
{
int iColumns = m_dRemap.GetLength();
int iCol = 0;
int iColumnStart = m_iDataStart;
int iQuotPrev = -1;
int iEscapeStart = -1;
const BYTE * s = m_dBuf.Begin() + m_iDataStart; // parse this line
BYTE * d = m_dBuf.Begin() + m_iDataStart; // do parsing in place
const BYTE * pEnd = m_dBuf.Begin() + m_iBufUsed; // until we reach the end of current buffer
m_iDocStart = m_iDataStart;
bool bOnlySpace = true;
bool bQuoted = false;
bool bHasQuot = false;
while (true)
{
assert ( d<=s );
// move to next control symbol
while ( s<pEnd && *s && *s!=m_iDelimiter && *s!='"' && *s!='\\' && *s!='\r' && *s!='\n' )
{
bOnlySpace &= sphIsSpace ( *s );
*d++ = *s++;
}
if ( s<pEnd )
{
assert ( !*s || *s==m_iDelimiter || *s=='"' || *s=='\\' || *s=='\r' || *s=='\n' );
bool bNull = !*s;
bool bEOL = ( *s=='\r' || *s=='\n' );
bool bDelimiter = ( *s==m_iDelimiter );
bool bQuot = ( *s=='"' );
bool bEscape = ( *s=='\\' );
int iOff = int ( s - m_dBuf.Begin() );
bool bEscaped = ( iEscapeStart>=0 && iEscapeStart+1==iOff );
// escape symbol outside double quotation
if ( !bQuoted && !bDelimiter && ( bEscape || bEscaped ) )
{
if ( bEscaped ) // next to escape symbol proceed as regular
{
*d++ = *s++;
} else // escape just started
{
iEscapeStart = iOff;
s++;
}
continue;
}
// double quote processing
// [ " ... " ]
// [ " ... "" ... " ]
// [ " ... """ ]
// [ " ... """" ... " ]
// any symbol inside double quote proceed as regular
// but quoted quote proceed as regular symbol
if ( bQuot )
{
if ( bOnlySpace && iQuotPrev==-1 )
{
// enable double quote
bQuoted = true;
bHasQuot = true;
} else if ( bQuoted )
{
// close double quote on 2st quote symbol
bQuoted = false;
} else if ( bHasQuot && iQuotPrev!=-1 && iQuotPrev+1==iOff )
{
// escaped quote found, re-enable double quote and copy symbol itself
bQuoted = true;
*d++ = '"';
} else
{
*d++ = *s;
}
s++;
iQuotPrev = iOff;
continue;
}
if ( bQuoted )
{
*d++ = *s++;
continue;
}
int iLen = int ( d - m_dBuf.Begin() - iColumnStart );
assert ( iLen>=0 );
if ( iCol<m_dColumnsLen.GetLength() )
m_dColumnsLen[iCol] = iLen;
*d++ = '\0';
s++;
iCol++;
if ( bNull ) // null terminated string found
{
m_iDataStart = m_iBufUsed = 0;
break;
} else if ( bEOL ) // end of document found
{
// skip all EOL characters
while ( s<pEnd && *s && ( *s=='\r' || *s=='\n' ) )
s++;
break;
}
assert ( bDelimiter );
// column separator found
iColumnStart = int ( d - m_dBuf.Begin() );
bOnlySpace = true;
bQuoted = false;
bHasQuot = false;
iQuotPrev = -1;
continue;
}
/////////////////////
// read in more data
/////////////////////
int iDstOff = int ( s - m_dBuf.Begin() );
int iSrcOff = int ( d - m_dBuf.Begin() );
// if there is space at the start, move data around
// if not, resize the buffer
if ( m_iDataStart>0 )
{
memmove ( m_dBuf.Begin(), m_dBuf.Begin() + m_iDataStart, m_iBufUsed - m_iDataStart );
m_iBufUsed -= m_iDataStart;
iDstOff -= m_iDataStart;
iSrcOff -= m_iDataStart;
iColumnStart -= m_iDataStart;
if ( iQuotPrev!=-1 )
iQuotPrev -= m_iDataStart;
iEscapeStart -= m_iDataStart;
m_iDataStart = 0;
m_iDocStart = 0;
} else if ( m_iBufUsed==m_dBuf.GetLength() )
{
m_dBuf.Resize ( m_dBuf.GetLength()*2 );
}
// do read
auto iGot = (int) fread ( m_dBuf.Begin() + m_iBufUsed, 1, m_dBuf.GetLength() - m_iBufUsed, m_pFP );
if ( !iGot )
{
if ( !iCol )
{
// normal file termination - no pending columns and documents
m_iDataStart = m_iBufUsed = 0;
m_tDocInfo.m_tRowID = INVALID_ROWID;
return CSphSource_BaseSV::DATA_OVER;
}
if ( iCol!=iColumns )
sError.SetSprintf ( "source '%s': not all columns found (found=%d, total=%d, line=%d, error='%s')", m_tSchema.GetName(), iCol, iColumns, m_iLine, strerror(errno) );
else
{
// error in case no data left in middle of data stream
sError.SetSprintf ( "source '%s': read error '%s' (line=%d)", m_tSchema.GetName(), strerror(errno), m_iLine );
}
return CSphSource_BaseSV::PARSING_FAILED;
}
m_iBufUsed += iGot;
// restore pointers because of the resize
s = m_dBuf.Begin() + iDstOff;
d = m_dBuf.Begin() + iSrcOff;
pEnd = m_dBuf.Begin() + m_iBufUsed;
// skip all EOL characters left from previous row
if ( !iCol )
{
while ( s<pEnd && *s && ( *s=='\r' || *s=='\n' ) )
s++;
}
}
// all columns presence check
if ( iCol!=iColumns )
{
sError.SetSprintf ( "source '%s': not all columns found (found=%d, total=%d, line=%d)", m_tSchema.GetName(), iCol, iColumns, m_iLine );
return CSphSource_BaseSV::PARSING_FAILED;
}
// tail data
assert ( s<=pEnd );
m_iDataStart = int ( s - m_dBuf.Begin() );
return CSphSource_BaseSV::GOT_DOCUMENT;
}
bool CSphSource_CSV::SetupSchema ( const CSphConfigSection & hSource, bool bWordDict, CSphString & sError )
{
bool bOk = true;
bOk &= ConfigureAttrs ( hSource("csvpipe_attr_uint"), SPH_ATTR_INTEGER, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("csvpipe_attr_timestamp"), SPH_ATTR_TIMESTAMP, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("csvpipe_attr_bool"), SPH_ATTR_BOOL, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("csvpipe_attr_float"), SPH_ATTR_FLOAT, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("csvpipe_attr_bigint"), SPH_ATTR_BIGINT, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("csvpipe_attr_multi"), SPH_ATTR_UINT32SET, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("csvpipe_attr_multi_64"), SPH_ATTR_INT64SET, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("csvpipe_attr_string"), SPH_ATTR_STRING, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("csvpipe_attr_json"), SPH_ATTR_JSON, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("csvpipe_field_string"), SPH_ATTR_STRING, m_tSchema, sError );
if ( !bOk )
return false;
ConfigureFields ( hSource("csvpipe_field"), bWordDict, m_tSchema );
ConfigureFields ( hSource("csvpipe_field_string"), bWordDict, m_tSchema );
return true;
}
void CSphSource_CSV::SetDelimiter ( const char * sDelimiter )
{
if ( sDelimiter && *sDelimiter )
m_iDelimiter = *sDelimiter;
}
| 22,509
|
C++
|
.cpp
| 673
| 30.365527
| 168
| 0.662194
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,973
|
source_sql.cpp
|
manticoresoftware_manticoresearch/src/indexing_sources/source_sql.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "source_sql.h"
#include "attribute.h"
#include "sphinxint.h"
#include "conversion.h"
#if WITH_ZLIB
#include <zlib.h>
#endif
#define SPH_UNPACK_BUFFER_SIZE 4096
#include <ctime>
static char * sphStrMacro ( const char * sTemplate, const char * sMacro, uint64_t uValue )
{
// expand macro
char sExp[32];
snprintf ( sExp, sizeof(sExp), UINT64_FMT, uValue );
// calc lengths
auto iExp = (int) strlen ( sExp );
auto iMacro = (int) strlen ( sMacro );
int iDelta = iExp-iMacro;
// calc result length
auto iRes = (int) strlen ( sTemplate );
const char * sCur = sTemplate;
while ( ( sCur = strstr ( sCur, sMacro ) )!=NULL )
{
iRes += iDelta;
sCur++;
}
// build result
char * sRes = new char [ iRes+1 ];
char * sOut = sRes;
const char * sLast = sTemplate;
sCur = sTemplate;
while ( ( sCur = strstr ( sCur, sMacro ) )!=NULL )
{
strncpy ( sOut, sLast, sCur-sLast ); sOut += sCur-sLast;
strcpy ( sOut, sExp ); sOut += iExp; // NOLINT
sCur += iMacro;
sLast = sCur;
}
if ( *sLast )
strcpy ( sOut, sLast ); // NOLINT
assert ( (int)strlen(sRes)==iRes );
return sRes;
}
/////////////////////////////////////////////////////////////////////////////
// GENERIC SQL SOURCE
/////////////////////////////////////////////////////////////////////////////
const char * const CSphSource_SQL::MACRO_VALUES [ CSphSource_SQL::MACRO_COUNT ] =
{
"$start",
"$end"
};
CSphSource_SQL::CSphSource_SQL ( const char * sName )
: CSphSource ( sName )
{
}
bool CSphSource_SQL::SetupSQL ( const CSphSourceParams_SQL & tParams )
{
// checks
assert ( !tParams.m_sQuery.IsEmpty() );
m_tParams = tParams;
// defaults
#define LOC_FIX_NULL(_arg) if ( !m_tParams._arg.cstr() ) m_tParams._arg = "";
LOC_FIX_NULL ( m_sHost );
LOC_FIX_NULL ( m_sUser );
LOC_FIX_NULL ( m_sPass );
LOC_FIX_NULL ( m_sDB );
#undef LOC_FIX_NULL
#define LOC_FIX_QARRAY(_arg) \
ARRAY_FOREACH ( i, m_tParams._arg ) \
if ( m_tParams._arg[i].IsEmpty() ) \
m_tParams._arg.Remove ( i-- );
LOC_FIX_QARRAY ( m_dQueryPre );
LOC_FIX_QARRAY ( m_dQueryPost );
LOC_FIX_QARRAY ( m_dQueryPostIndex );
#undef LOC_FIX_QARRAY
// build and store default DSN for error reporting
m_sSqlDSN.SetSprintf ( "sql://%s:***@%s:%d/%s",
m_tParams.m_sUser.cstr(), m_tParams.m_sHost.cstr(),
m_tParams.m_uPort, m_tParams.m_sDB.cstr() );
if ( m_tParams.m_iMaxFileBufferSize > 0 )
m_iMaxFileBufferSize = m_tParams.m_iMaxFileBufferSize;
m_eOnFileFieldError = m_tParams.m_eOnFileFieldError;
return true;
}
static const char * SubstituteParams ( const char * sQuery, const char * const * dMacroses, const char ** dValues, int iMcount )
{
// OPTIMIZE? things can be precalculated
const char * sCur = sQuery;
size_t iLen = 0;
while ( *sCur )
{
if ( *sCur=='$' )
{
int i;
for ( i=0; i<iMcount; i++ )
if ( strncmp ( dMacroses[i], sCur, strlen ( dMacroses[i] ) )==0 )
{
sCur += strlen ( dMacroses[i] );
iLen += strlen ( dValues[i] );
break;
}
if ( i<iMcount )
continue;
}
sCur++;
iLen++;
}
iLen++; // trailing zero
// do interpolation
auto * sRes = new char [ iLen ];
sCur = sQuery;
char * sDst = sRes;
while ( *sCur )
{
if ( *sCur=='$' )
{
int i;
for ( i=0; i<iMcount; i++ )
if ( strncmp ( dMacroses[i], sCur, strlen ( dMacroses[i] ) )==0 )
{
strcpy ( sDst, dValues[i] ); // NOLINT
sCur += strlen ( dMacroses[i] );
sDst += strlen ( dValues[i] );
break;
}
if ( i<iMcount )
continue;
}
*sDst++ = *sCur++;
}
*sDst++ = '\0';
assert ( sDst-sRes==(int) iLen );
return sRes;
}
bool CSphSource_SQL::RunQueryStep ( const char * sQuery, CSphString & sError )
{
sError = "";
if ( m_tParams.m_iRangeStep<=0 )
return false;
if ( m_tCurrentID>m_tMaxID )
return false;
static const int iBufSize = 32;
const char * sRes = nullptr;
sphSleepMsec ( m_tParams.m_iRangedThrottleMs );
//////////////////////////////////////////////
// range query with $start/$end interpolation
//////////////////////////////////////////////
assert ( m_tMinID>0 );
assert ( m_tMaxID>0 );
assert ( m_tMinID<=m_tMaxID );
assert ( sQuery );
char sValues [ MACRO_COUNT ] [ iBufSize ];
const char * pValues [ MACRO_COUNT ];
DocID_t tNextID = Min ( m_tCurrentID + (DocID_t)m_tParams.m_iRangeStep - 1, m_tMaxID );
snprintf ( sValues[0], iBufSize, INT64_FMT, m_tCurrentID );
snprintf ( sValues[1], iBufSize, INT64_FMT, tNextID );
pValues[0] = sValues[0];
pValues[1] = sValues[1];
g_iIndexerCurrentRangeMin = m_tCurrentID;
g_iIndexerCurrentRangeMax = tNextID;
m_tCurrentID = 1 + tNextID;
sRes = SubstituteParams ( sQuery, MACRO_VALUES, pValues, MACRO_COUNT );
// run query
SqlDismissResult ();
bool bRes = SqlQuery ( sRes );
if ( !bRes )
sError.SetSprintf ( "sql_query_range: %s (DSN=%s)", SqlError(), m_sSqlDSN.cstr() );
SafeDeleteArray ( sRes );
return bRes;
}
static bool HookConnect ( const char* szCommand )
{
FILE * pPipe = popen ( szCommand, "r" );
if ( !pPipe )
return false;
pclose ( pPipe );
return true;
}
inline static const char* skipspace ( const char* pBuf, const char* pBufEnd )
{
assert ( pBuf );
assert ( pBufEnd );
while ( (pBuf<pBufEnd) && isspace ( *pBuf ) )
++pBuf;
return pBuf;
}
inline static const char* scannumber ( const char* pBuf, const char* pBufEnd, DocID_t * pRes )
{
assert ( pBuf );
assert ( pBufEnd );
assert ( pRes );
if ( pBuf<pBufEnd )
{
*pRes = 0;
// FIXME! could check for overflow
while ( isdigit ( *pBuf ) && pBuf<pBufEnd )
(*pRes) = 10*(*pRes) + (int)( (*pBuf++)-'0' );
}
return pBuf;
}
static bool HookQueryRange ( const char* szCommand, DocID_t* pMin, DocID_t* pMax )
{
FILE * pPipe = popen ( szCommand, "r" );
if ( !pPipe )
return false;
const int MAX_BUF_SIZE = 1024;
char dBuf [MAX_BUF_SIZE];
auto iRead = (int)fread ( dBuf, 1, MAX_BUF_SIZE, pPipe );
pclose ( pPipe );
const char* pStart = dBuf;
const char* pEnd = pStart + iRead;
// leading whitespace and 1-st number
pStart = skipspace ( pStart, pEnd );
pStart = scannumber ( pStart, pEnd, pMin );
// whitespace and 2-nd number
pStart = skipspace ( pStart, pEnd );
scannumber ( pStart, pEnd, pMax );
return true;
}
static bool HookPostIndex ( const char* szCommand, DocID_t tLastIndexed )
{
const char * sMacro = "$maxid";
char sValue[32];
const char* pValue = sValue;
snprintf ( sValue, sizeof(sValue), UINT64_FMT, tLastIndexed );
const char * pCmd = SubstituteParams ( szCommand, &sMacro, &pValue, 1 );
FILE * pPipe = popen ( pCmd, "r" );
SafeDeleteArray ( pCmd );
if ( !pPipe )
return false;
pclose ( pPipe );
return true;
}
/// connect to SQL server
bool CSphSource_SQL::Connect ( CSphString & sError )
{
// do not connect twice
if ( m_bSqlConnected )
return true;
// try to connect
if ( !SqlConnect() )
{
sError.SetSprintf ( "sql_connect: %s (DSN=%s)", SqlError(), m_sSqlDSN.cstr() );
return false;
}
m_tHits.Reserve ( m_iMaxHits );
// all good
m_bSqlConnected = true;
if ( !m_tParams.m_sHookConnect.IsEmpty() && !HookConnect ( m_tParams.m_sHookConnect.cstr() ) )
{
sError.SetSprintf ( "hook_connect: runtime error %s when running external hook", strerrorm(errno) );
return false;
}
return true;
}
static void FormatEscaped ( FILE * fp, const char * sLine )
{
// handle empty lines
if ( !sLine || !*sLine )
{
fprintf ( fp, "''" );
return;
}
// pass one, count the needed buffer size
auto iLen = (int) strlen(sLine);
int iOut = 0;
for ( int i=0; i<iLen; i++ )
switch ( sLine[i] )
{
case '\t':
case '\'':
case '\\':
iOut += 2;
break;
default:
iOut++;
break;
}
iOut += 2; // quotes
// allocate the buffer
char sMinibuffer[8192];
char * sMaxibuffer = NULL;
char * sBuffer = sMinibuffer;
if ( iOut>(int)sizeof(sMinibuffer) )
{
sMaxibuffer = new char [ iOut+4 ]; // 4 is just my safety gap
sBuffer = sMaxibuffer;
}
// pass two, escape it
char * sOut = sBuffer;
*sOut++ = '\'';
for ( int i=0; i<iLen; i++ )
switch ( sLine[i] )
{
case '\t':
case '\'':
case '\\': *sOut++ = '\\'; // no break intended
// [[clang::fallthrough]];
default: *sOut++ = sLine[i];
}
*sOut++ = '\'';
// print!
assert ( sOut==sBuffer+iOut );
fwrite ( sBuffer, 1, iOut, fp );
// cleanup
SafeDeleteArray ( sMaxibuffer );
}
#define LOC_ERROR(_msg,_arg) { sError.SetSprintf ( _msg, _arg ); return false; }
#define LOC_ERROR2(_msg,_arg,_arg2) { sError.SetSprintf ( _msg, _arg, _arg2 ); return false; }
/// setup them ranges (called both for document range-queries and MVA range-queries)
bool CSphSource_SQL::SetupRanges ( const char * sRangeQuery, const char * sQuery, const char * sPrefix, CSphString & sError, ERangesReason iReason )
{
// check step
if ( m_tParams.m_iRangeStep<=0 )
LOC_ERROR ( "sql_range_step=" INT64_FMT ": must be non-zero positive", m_tParams.m_iRangeStep );
if ( m_tParams.m_iRangeStep<128 )
sphWarn ( "sql_range_step=" INT64_FMT ": too small; might hurt indexing performance!", m_tParams.m_iRangeStep );
// check query for macros
for ( const char* sMacro : MACRO_VALUES )
if ( !strstr ( sQuery, sMacro ) )
LOC_ERROR2 ( "%s: macro '%s' not found in match fetch query", sPrefix, sMacro );
// run query
if ( !SqlQuery ( sRangeQuery ) )
{
sError.SetSprintf ( "%s: range-query failed: %s (DSN=%s)", sPrefix, SqlError(), m_sSqlDSN.cstr() );
return false;
}
// fetch min/max
int iCols = SqlNumFields ();
if ( iCols!=2 )
LOC_ERROR2 ( "%s: expected 2 columns (min_id/max_id), got %d", sPrefix, iCols );
if ( !SqlFetchRow() )
{
sError.SetSprintf ( "%s: range-query fetch failed: %s (DSN=%s)", sPrefix, SqlError(), m_sSqlDSN.cstr() );
return false;
}
if ( ( SqlColumn(0)==NULL || !SqlColumn(0)[0] ) && ( SqlColumn(1)==NULL || !SqlColumn(1)[0] ) )
{
// the source seems to be empty; workaround
m_tMinID = 1;
m_tMaxID = 1;
} else
{
// get and check min/max id
const char * sCol0 = SqlColumn(0);
const char * sCol1 = SqlColumn(1);
m_tMinID = sphToInt64 ( sCol0 );
m_tMaxID = sphToInt64 ( sCol1 );
if ( !sCol0 ) sCol0 = "(null)";
if ( !sCol1 ) sCol1 = "(null)";
if ( m_tMinID>m_tMaxID )
LOC_ERROR2 ( "sql_query_range: min_id='%s', max_id='%s': min_id must be less than max_id", sCol0, sCol1 );
}
SqlDismissResult ();
if ( iReason==SRE_DOCS && ( !m_tParams.m_sHookQueryRange.IsEmpty() ) )
{
if ( !HookQueryRange ( m_tParams.m_sHookQueryRange.cstr(), &m_tMinID, &m_tMaxID ) )
LOC_ERROR ( "hook_query_range: runtime error %s when running external hook", strerror(errno) );
if ( m_tMinID>m_tMaxID )
LOC_ERROR2 ( "hook_query_range: min_id=" INT64_FMT ", max_id=" INT64_FMT ": min_id must be less than max_id", m_tMinID, m_tMaxID );
}
return true;
}
/// issue main rows fetch query
bool CSphSource_SQL::IterateStart ( CSphString & sError )
{
assert ( m_bSqlConnected );
if ( !QueryPreAll ( sError ) )
return false;
// run pre-queries
ARRAY_FOREACH ( i, m_tParams.m_dQueryPre )
{
if ( !SqlQuery ( m_tParams.m_dQueryPre[i].cstr() ) )
{
sError.SetSprintf ( "sql_query_pre[%d]: %s (DSN=%s)", i, SqlError(), m_sSqlDSN.cstr() );
SqlDisconnect ();
return false;
}
SqlDismissResult ();
}
while (true)
{
m_tParams.m_iRangeStep = 0;
// issue first fetch query
if ( !m_tParams.m_sQueryRange.IsEmpty() )
{
m_tParams.m_iRangeStep = m_tParams.m_iRefRangeStep;
// run range-query; setup ranges
if ( !SetupRanges ( m_tParams.m_sQueryRange.cstr(), m_tParams.m_sQuery.cstr(), "sql_query_range: ", sError, SRE_DOCS ) )
return false;
// issue query
m_tCurrentID = m_tMinID;
if ( !RunQueryStep ( m_tParams.m_sQuery.cstr(), sError ) )
return false;
} else
{
// normal query; just issue
if ( !SqlQuery ( m_tParams.m_sQuery.cstr() ) )
{
sError.SetSprintf ( "sql_query: %s (DSN=%s)", SqlError(), m_sSqlDSN.cstr() );
return false;
}
}
break;
}
// some post-query setup
m_tSchema.Reset();
for (auto & i : m_dUnpack)
i = SPH_UNPACK_NONE;
m_iSqlFields = SqlNumFields(); // for rowdump
CSphVector<bool> dFound;
dFound.Resize ( m_tParams.m_dAttrs.GetLength() );
ARRAY_FOREACH ( i, dFound )
dFound[i] = false;
const bool bWordDict = m_pDict->GetSettings().m_bWordDict;
// map plain attrs from SQL
for ( int i=0; i<m_iSqlFields; i++ )
{
const char * sName = SqlFieldName(i);
if ( !sName )
LOC_ERROR ( "column number %d has no name", i+1 );
CSphColumnInfo tCol ( sName );
ARRAY_FOREACH ( j, m_tParams.m_dAttrs )
{
const CSphColumnInfo & tAttr = m_tParams.m_dAttrs[j];
if ( !strcasecmp ( tCol.m_sName.cstr(), tAttr.m_sName.cstr() ) )
{
tCol.m_eAttrType = tAttr.m_eAttrType;
assert ( tCol.m_eAttrType!=SPH_ATTR_NONE );
if ( ( tAttr.m_eAttrType==SPH_ATTR_UINT32SET || tAttr.m_eAttrType==SPH_ATTR_INT64SET ) && tAttr.m_eSrc!=SPH_ATTRSRC_FIELD )
LOC_ERROR ( "multi-valued attribute '%s' of wrong source-type found in query; must be 'field'", tAttr.m_sName.cstr() );
tCol = tAttr;
dFound[j] = true;
break;
}
if ( !strcasecmp ( sphGetDocidName(), tAttr.m_sName.cstr() ) )
LOC_ERROR ( "can not redefine auto-defined '%s' attribute", tAttr.m_sName.cstr() );
}
for ( auto & tJoined : m_tParams.m_dJoinedFields )
if ( tJoined.m_sName==sName )
LOC_ERROR ( "joined field '%s' has the same name as a fulltext field", sName );
if ( !i )
{
// id column coming from sql may have another name
tCol.m_sName = sphGetDocidName();
tCol.m_eAttrType = SPH_ATTR_BIGINT;
}
ARRAY_FOREACH ( j, m_tParams.m_dFileFields )
{
if ( !strcasecmp ( tCol.m_sName.cstr(), m_tParams.m_dFileFields[j].cstr() ) )
tCol.m_bFilename = true;
}
tCol.m_iIndex = i;
tCol.m_eWordpart = GetWordpart ( tCol.m_sName.cstr(), bWordDict );
if ( tCol.m_eAttrType==SPH_ATTR_NONE || tCol.m_bIndexed )
{
if ( m_tSchema.GetField ( tCol.m_sName.cstr() ) )
LOC_ERROR ( "field '%s' is added twice", tCol.m_sName.cstr() );
m_tSchema.AddField ( tCol );
ARRAY_FOREACH ( k, m_tParams.m_dUnpack )
{
CSphUnpackInfo & tUnpack = m_tParams.m_dUnpack[k];
if ( tUnpack.m_sName==tCol.m_sName )
{
if ( !m_bCanUnpack )
{
sError.SetSprintf ( "this source does not support column unpacking" );
return false;
}
int iIndex = m_tSchema.GetFieldsCount() - 1;
if ( iIndex < SPH_MAX_FIELDS )
{
m_dUnpack[iIndex] = tUnpack.m_eFormat;
m_dUnpackBuffers[iIndex].Resize ( SPH_UNPACK_BUFFER_SIZE );
}
break;
}
}
}
if ( tCol.m_eAttrType!=SPH_ATTR_NONE )
{
if ( CSphSchema::IsReserved ( tCol.m_sName.cstr() ) )
LOC_ERROR ( "%s is not a valid attribute name", tCol.m_sName.cstr() );
if ( m_tSchema.GetAttr ( tCol.m_sName.cstr() ) )
LOC_ERROR ( "attribute '%s' is added twice", tCol.m_sName.cstr() );
m_tSchema.AddAttr ( tCol, true ); // all attributes are dynamic at indexing time
}
}
// map multi-valued attrs
ARRAY_FOREACH ( i, m_tParams.m_dAttrs )
{
const CSphColumnInfo & tAttr = m_tParams.m_dAttrs[i];
if ( ( tAttr.m_eAttrType==SPH_ATTR_UINT32SET || tAttr.m_eAttrType==SPH_ATTR_INT64SET ) && tAttr.m_eSrc!=SPH_ATTRSRC_FIELD )
{
CSphColumnInfo tMva;
tMva = tAttr;
tMva.m_iIndex = m_tSchema.GetAttrsCount();
if ( CSphSchema::IsReserved ( tMva.m_sName.cstr() ) )
LOC_ERROR ( "%s is not a valid attribute name", tMva.m_sName.cstr() );
m_tSchema.AddAttr ( tMva, true ); // all attributes are dynamic at indexing time
dFound[i] = true;
}
}
// warn if some attrs went unmapped
ARRAY_FOREACH ( i, dFound )
if ( !dFound[i] )
sphWarn ( "attribute '%s' not found - IGNORING", m_tParams.m_dAttrs[i].m_sName.cstr() );
// joined fields
m_iPlainFieldsLength = m_tSchema.GetFieldsCount();
for ( const auto & tJoinedField: m_tParams.m_dJoinedFields )
{
CSphColumnInfo tCol;
tCol.m_iIndex = -1;
tCol.m_sName = tJoinedField.m_sName;
tCol.m_sQuery = tJoinedField.m_sQuery;
tCol.m_bPayload = tJoinedField.m_bPayload;
tCol.m_sQueryRange = tJoinedField.m_sRanged;
tCol.m_eWordpart = GetWordpart ( tCol.m_sName.cstr(), bWordDict );
tCol.m_eSrc = ( !tJoinedField.m_bRangedMain ? SPH_ATTRSRC_QUERY : SPH_ATTRSRC_RANGEDMAINQUERY );
if ( !tJoinedField.m_sRanged.IsEmpty() )
tCol.m_eSrc = SPH_ATTRSRC_RANGEDQUERY;
m_tSchema.AddField ( tCol );
}
// auto-computed length attributes
if ( !AddAutoAttrs ( sError ) )
return false;
StrVec_t dWarnings;
m_tSchema.SetupFlags ( *this, false, &dWarnings );
for ( const auto & i : dWarnings )
sphWarn ( "%s", i.cstr() );
// check it
if ( m_tSchema.GetFieldsCount()>SPH_MAX_FIELDS )
LOC_ERROR2 ( "too many fields (fields=%d, max=%d)", m_tSchema.GetFieldsCount(), SPH_MAX_FIELDS );
// alloc storage
AllocDocinfo();
// log it
DumpRowsHeader();
return true;
}
bool CSphSource_SQL::QueryPreAll ( CSphString& sError )
{
// run pre-queries
ARRAY_FOREACH ( i, m_tParams.m_dQueryPreAll )
{
if ( !SqlQuery ( m_tParams.m_dQueryPreAll[i].cstr() ) )
{
sError.SetSprintf ( "sql_query_pre_all[%d]: %s (DSN=%s)", i, SqlError(), m_sSqlDSN.cstr() );
SqlDisconnect();
return false;
}
// sphWarn ( "query_pre_app %d: %s", i, m_tParams.m_dQueryPreAll[i].cstr() );
SqlDismissResult();
}
return true;
}
// dump schema to given file or stdout, if rt inserts expected
void CSphSource_SQL::DumpRowsHeader ()
{
if ( m_tParams.m_bPrintRTQueries )
{
DumpRowsHeaderSphinxql();
return;
}
if ( !m_fpDumpRows )
return;
const char * sTable = m_tSchema.GetName ();
time_t iNow = time ( nullptr );
fprintf ( m_fpDumpRows, "#\n# === source %s ts %d\n# %s#\n", sTable, (int) iNow, ctime ( &iNow ));
for ( int i = 0; i<m_tSchema.GetFieldsCount (); ++i )
fprintf ( m_fpDumpRows, "# field %d: %s\n", i, m_tSchema.GetFieldName ( i ));
for ( int i = 0; i<m_tSchema.GetAttrsCount (); i++ )
{
const CSphColumnInfo & tCol = m_tSchema.GetAttr ( i );
fprintf ( m_fpDumpRows, "# %s = %s # attr %d\n", sphTypeDirective ( tCol.m_eAttrType ), tCol.m_sName.cstr (),
i );
}
fprintf ( m_fpDumpRows, "#\n\nDROP TABLE IF EXISTS rows_%s;\nCREATE TABLE rows_%s (\n id VARCHAR(32) NOT NULL,\n",
sTable, sTable );
for ( int i = 1; i<m_iSqlFields; ++i )
fprintf ( m_fpDumpRows, " %s VARCHAR(4096) NOT NULL,\n", SqlFieldName ( i ));
fprintf ( m_fpDumpRows, " KEY(id) );\n\n" );
}
// dump schema to given file or stdout, if rt inserts expected
void CSphSource_SQL::DumpRowsHeaderSphinxql ()
{
const char * sTable = m_tSchema.GetName ();
time_t iNow = time ( nullptr );
m_sCollectDump.Clear();
m_sCollectDump.Sprintf ( "#\n# === source %s ts %d\n# %s#\n", sTable, (int) iNow, ctime ( &iNow ));
SmallStringHash_T<int> hSqlSchema;
for ( int i = 0; i<m_iSqlFields; ++i )
hSqlSchema.Add ( i, SqlFieldName ( i ));
m_dDumpMap.Reset();
m_dDumpMap.Add ( {hSqlSchema.Exists("id") ? hSqlSchema["id"] : 0, false} );
for ( int i = 0; i<m_tSchema.GetFieldsCount (); ++i )
{
if ( hSqlSchema.Exists ( m_tSchema.GetFieldName ( i ) ))
m_dDumpMap.Add ( {hSqlSchema[m_tSchema.GetFieldName ( i )], true} );
m_sCollectDump.Sprintf ( "#\trt_field = %s # field %d\n", m_tSchema.GetFieldName ( i ), i );
}
auto * sBlobLocator = sphGetBlobLocatorName ();
auto * sIdLocator = sphGetDocidName ();
for ( int i = 0; i<m_tSchema.GetAttrsCount (); ++i )
{
const CSphColumnInfo & tCol = m_tSchema.GetAttr ( i );
if ( tCol.m_sName!=sBlobLocator && tCol.m_sName!=sIdLocator )
{
if ( hSqlSchema.Exists ( tCol.m_sName ))
m_dDumpMap.Add ( {hSqlSchema[tCol.m_sName],
tCol.m_eAttrType==SPH_ATTR_STRING || tCol.m_eAttrType==SPH_ATTR_STRINGPTR} );
auto sTypeName = sphRtTypeDirective ( tCol.m_eAttrType );
if ( sTypeName )
m_sCollectDump.Sprintf ( "#\t%s = %s # attr %d\n", sTypeName, tCol.m_sName.cstr (), i );
}
}
m_sCollectDump << "#\n\n";
auto fpDump = m_fpDumpRows;
if ( !fpDump )
fpDump = stdout;
fprintf ( fpDump, "%s", m_sCollectDump.cstr() );
m_sCollectDump.Clear();
}
#undef LOC_ERROR
#undef LOC_ERROR2
#undef LOC_SQL_ERROR
void CSphSource_SQL::Disconnect ()
{
SafeDeleteArray ( m_pReadFileBuffer );
m_tHits.Reset();
if ( m_bSqlConnected )
SqlDisconnect ();
m_bSqlConnected = false;
}
bool CSphSource_SQL::StoreAttribute ( int iAttr )
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(iAttr);
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_STRING:
case SPH_ATTR_JSON:
// memorize string, fixup NULLs
m_dStrAttrs[iAttr] = SqlColumn ( tAttr.m_iIndex );
if ( !m_dStrAttrs[iAttr].cstr() )
m_dStrAttrs[iAttr] = "";
break;
case SPH_ATTR_FLOAT:
{
float fValue = sphToFloat ( SqlColumn ( tAttr.m_iIndex ) ); // FIXME? report conversion errors maybe?
m_dAttrs[iAttr] = sphF2DW(fValue);
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttrFloat ( tAttr.m_tLocator, fValue );
}
break;
case SPH_ATTR_BIGINT:
if ( tAttr.m_iIndex<0 )
{
assert ( tAttr.m_sName==sphGetBlobLocatorName() );
} else
{
bool bDocId = !iAttr;
const char * szNumber = SqlColumn ( tAttr.m_iIndex );
CSphString sWarn;
if ( bDocId )
{
uint64_t uDocID = StrToDocID ( szNumber, sWarn );
if ( !sWarn.IsEmpty() )
{
sphWarn ( "%s", sWarn.cstr() );
return false;
}
m_dAttrs[iAttr] = (int64_t)uDocID;
m_tMaxFetchedID = (int64_t)Max ( (uint64_t)m_tMaxFetchedID, uDocID );
}
else
m_dAttrs[iAttr] = sphToInt64 ( szNumber, &sWarn );
if ( !sWarn.IsEmpty() )
sphWarn ( "%s", sWarn.cstr() );
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttr ( tAttr.m_tLocator, m_dAttrs[iAttr] );
}
break;
case SPH_ATTR_TOKENCOUNT:
// reset, and the value will be filled by IterateHits()
m_tDocInfo.SetAttr ( tAttr.m_tLocator, 0 );
break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
if ( tAttr.m_eSrc==SPH_ATTRSRC_FIELD )
ParseFieldMVA ( iAttr, SqlColumn ( tAttr.m_iIndex ) );
break;
case SPH_ATTR_BOOL:
m_dAttrs[iAttr] = sphToDword ( SqlColumn ( tAttr.m_iIndex ) ) ? 1 : 0;
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttr ( tAttr.m_tLocator, m_dAttrs[iAttr] ); // FIXME? report conversion errors maybe?
break;
default:
// just store as uint by default
m_dAttrs[iAttr] = sphToDword ( SqlColumn ( tAttr.m_iIndex ) ); // FIXME? report conversion errors maybe?
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttr ( tAttr.m_tLocator, m_dAttrs[iAttr] ); // FIXME? report conversion errors maybe?
break;
}
return true;
}
BYTE ** CSphSource_SQL::NextDocument ( bool & bEOF, CSphString & sError )
{
assert ( m_bSqlConnected );
bool bSkipDoc = false;
do
{
// try to get next row
bool bGotRow = SqlFetchRow ();
bEOF = false;
// when the party's over...
while ( !bGotRow )
{
// is that an error?
if ( SqlIsError() )
{
sError.SetSprintf ( "sql_fetch_row: %s", SqlError() );
return nullptr;
}
// maybe we can do next step yet?
if ( !RunQueryStep ( m_tParams.m_sQuery.cstr(), sError ) )
{
// if there's a message, there's an error
// otherwise, we're just over
if ( !sError.IsEmpty() )
return nullptr;
} else
{
// step went fine; try to fetch
bGotRow = SqlFetchRow ();
continue;
}
SqlDismissResult();
// ok, we're over
ARRAY_FOREACH ( i, m_tParams.m_dQueryPost )
{
if ( !SqlQuery ( m_tParams.m_dQueryPost[i].cstr() ) )
{
sphWarn ( "sql_query_post[%d]: error=%s, query=%s", i, SqlError(), m_tParams.m_dQueryPost[i].cstr() );
break;
}
SqlDismissResult();
}
bEOF = true;
return nullptr;
}
// cleanup attrs
for ( int i=0; i<m_tSchema.GetRowSize(); i++ )
m_tDocInfo.m_pDynamic[i] = 0;
// split columns into fields and attrs
for ( int i=0; i<m_iPlainFieldsLength; i++ )
{
// get that field
auto tCol = SqlUnpackColumn ( i, m_dUnpack[i] );
m_dFields[i] = (BYTE*)const_cast<char*> ( tCol.first );
m_dFieldLengths[i] = tCol.second;
}
m_dMvas.Resize ( m_tSchema.GetAttrsCount() );
for ( auto & i : m_dMvas )
i.Resize(0);
bSkipDoc = false;
for ( int i=0; i<m_tSchema.GetAttrsCount() && !bSkipDoc; i++ )
if ( !StoreAttribute(i) )
bSkipDoc = true;
}
while ( bSkipDoc );
// log it
DumpDocument();
return m_dFields;
}
void CSphSource_SQL::DumpDocument ()
{
if ( m_tParams.m_bPrintRTQueries ) {
DumpDocumentSphinxql ();
return;
}
if ( !m_fpDumpRows )
return;
fprintf ( m_fpDumpRows, "INSERT INTO rows_%s VALUES (", m_tSchema.GetName ());
for ( int i = 0; i<m_iSqlFields; ++i )
{
if ( i )
fprintf ( m_fpDumpRows, ", " );
FormatEscaped ( m_fpDumpRows, SqlColumn ( i ));
}
fprintf ( m_fpDumpRows, ");\n" );
}
void CSphSource_SQL::DumpDocumentSphinxql ()
{
if (m_sCollectDump.IsEmpty ())
{
m_sCollectDump.Sprintf ( "INSERT INTO %s VALUES ", m_tParams.m_sDumpRTIndex.cstr() );
m_sCollectDump.StartBlock (",",nullptr,";\n");
}
m_sCollectDump.StartBlock( dBracketsComma );
ARRAY_FOREACH ( i, m_dDumpMap )
{
if ( m_dDumpMap[i].second )
m_sCollectDump.FixupSpacedAndAppendEscaped ( SqlColumn ( m_dDumpMap[i].first ) );
else
m_sCollectDump << SqlColumn ( m_dDumpMap[i].first );
}
m_sCollectDump.FinishBlock();
if ( m_sCollectDump.GetLength ()>m_iCutoutDumpSize )
{
m_sCollectDump.FinishBlocks ();
auto fpDump = m_fpDumpRows;
if ( !fpDump )
fpDump = stdout;
fprintf ( fpDump, "%s", m_sCollectDump.cstr ());
m_sCollectDump.Clear ();
}
}
const int * CSphSource_SQL::GetFieldLengths() const
{
return m_dFieldLengths;
}
void CSphSource_SQL::PostIndex ()
{
if ( ( !m_tParams.m_dQueryPostIndex.GetLength() ) && m_tParams.m_sHookPostIndex.IsEmpty() )
return;
assert ( !m_bSqlConnected );
const char * sSqlError = NULL;
if ( m_tParams.m_dQueryPostIndex.GetLength() )
{
#define LOC_SQL_ERROR(_msg) { sSqlError = _msg; break; }
while (true)
{
if ( !SqlConnect () )
LOC_SQL_ERROR ( "mysql_real_connect" );
ARRAY_FOREACH ( i, m_tParams.m_dQueryPostIndex )
{
char * sQuery = sphStrMacro ( m_tParams.m_dQueryPostIndex[i].cstr(), "$maxid", m_tMaxFetchedID );
bool bRes = SqlQuery ( sQuery );
delete [] sQuery;
if ( !bRes )
LOC_SQL_ERROR ( "sql_query_post_index" );
SqlDismissResult ();
}
break;
}
if ( sSqlError )
sphWarn ( "%s: %s (DSN=%s)", sSqlError, SqlError(), m_sSqlDSN.cstr() );
#undef LOC_SQL_ERROR
SqlDisconnect ();
}
if ( !m_tParams.m_sHookPostIndex.IsEmpty() && !HookPostIndex ( m_tParams.m_sHookPostIndex.cstr(), m_tMaxFetchedID ) )
{
sphWarn ( "hook_post_index: runtime error %s when running external hook", strerrorm(errno) );
}
}
bool CSphSource_SQL::IterateMultivaluedStart ( int iAttr, CSphString & sError )
{
if ( iAttr<0 || iAttr>=m_tSchema.GetAttrsCount() )
return false;
m_iMultiAttr = iAttr;
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(iAttr);
if ( !(tAttr.m_eAttrType==SPH_ATTR_UINT32SET || tAttr.m_eAttrType==SPH_ATTR_INT64SET ) )
return false;
if ( !QueryPreAll ( sError ) )
return false;
CSphString sPrefix;
switch ( tAttr.m_eSrc )
{
case SPH_ATTRSRC_FIELD:
return false;
case SPH_ATTRSRC_QUERY:
// run simple query
if ( !SqlQuery ( tAttr.m_sQuery.cstr() ) )
{
sError.SetSprintf ( "multi-valued attr '%s' query failed: %s", tAttr.m_sName.cstr(), SqlError() );
return false;
}
break;
case SPH_ATTRSRC_RANGEDQUERY:
m_tParams.m_iRangeStep = m_tParams.m_iRefRangeStep;
// setup ranges
sPrefix.SetSprintf ( "multi-valued attr '%s' ranged query: ", tAttr.m_sName.cstr() );
if ( !SetupRanges ( tAttr.m_sQueryRange.cstr(), tAttr.m_sQuery.cstr(), sPrefix.cstr(), sError, SRE_MVA ) )
return false;
// run first step (in order to report errors)
m_tCurrentID = m_tMinID;
if ( !RunQueryStep ( tAttr.m_sQuery.cstr(), sError ) )
return false;
break;
case SPH_ATTRSRC_RANGEDMAINQUERY:
if ( m_tParams.m_sQueryRange.IsEmpty() )
{
sError.SetSprintf ( "multi-valued attr '%s': empty main range query", tAttr.m_sName.cstr() );
return false;
}
m_tParams.m_iRangeStep = m_tParams.m_iRefRangeStep;
// setup ranges
sPrefix.SetSprintf ( "multi-valued attr '%s' ranged query: ", tAttr.m_sName.cstr() );
if ( !SetupRanges ( m_tParams.m_sQueryRange.cstr(), tAttr.m_sQuery.cstr(), sPrefix.cstr(), sError, SRE_MVA ) )
return false;
// run first step (in order to report errors)
m_tCurrentID = m_tMinID;
if ( !RunQueryStep ( tAttr.m_sQuery.cstr(), sError ) )
return false;
break;
default:
sError.SetSprintf ( "INTERNAL ERROR: unknown multi-valued attr source type %d", tAttr.m_eSrc );
return false;
}
// check fields count
if ( SqlNumFields()!=2 )
{
sError.SetSprintf ( "multi-valued attr '%s' query returned %d fields (expected 2)", tAttr.m_sName.cstr(), SqlNumFields() );
SqlDismissResult ();
return false;
}
return true;
}
bool CSphSource_SQL::IterateMultivaluedNext ( int64_t & iDocID, int64_t & iMvaValue )
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr ( m_iMultiAttr );
assert ( m_bSqlConnected );
assert ( tAttr.m_eAttrType==SPH_ATTR_UINT32SET || tAttr.m_eAttrType==SPH_ATTR_INT64SET );
// fetch next row
bool bGotRow = SqlFetchRow ();
while ( !bGotRow )
{
if ( SqlIsError() )
sphDie ( "sql_fetch_row: %s", SqlError() ); // FIXME! this should be reported
if ( tAttr.m_eSrc!=SPH_ATTRSRC_RANGEDQUERY && tAttr.m_eSrc!=SPH_ATTRSRC_RANGEDMAINQUERY )
{
SqlDismissResult();
return false;
}
CSphString sTmp;
if ( !RunQueryStep ( tAttr.m_sQuery.cstr(), sTmp ) ) // FIXME! this should be reported
return false;
bGotRow = SqlFetchRow ();
continue;
}
// return that tuple or offset to storage for MVA64 value
iDocID = sphToInt64 ( SqlColumn(0) );
iMvaValue = sphToInt64 ( SqlColumn(1) );
return true;
}
bool CSphSource_SQL::IterateKillListStart ( CSphString & sError )
{
if ( m_tParams.m_sQueryKilllist.IsEmpty () )
return false;
if ( !SqlQuery ( m_tParams.m_sQueryKilllist.cstr () ) )
{
sError.SetSprintf ( "killlist query failed: %s", SqlError() );
return false;
}
return true;
}
bool CSphSource_SQL::IterateKillListNext ( DocID_t & tDocID )
{
if ( SqlFetchRow () )
tDocID = sphToInt64 ( SqlColumn(0) );
else
{
if ( SqlIsError() )
sphDie ( "sql_query_killlist: %s", SqlError() ); // FIXME! this should be reported
else
{
SqlDismissResult ();
return false;
}
}
return true;
}
void CSphSource_SQL::ReportUnpackError ( int iIndex, int iError )
{
if ( !m_bUnpackFailed )
{
m_bUnpackFailed = true;
sphWarn ( "failed to unpack column '%s', error=%d, rowid=%u", SqlFieldName(iIndex), iError, m_tDocInfo.m_tRowID );
}
}
Str_t CSphSource_SQL::SqlColumnStream ( int iFieldIndex )
{
int iIndex = m_tSchema.GetField ( iFieldIndex ).m_iIndex;
Str_t tResult { SqlColumn ( iIndex ), SqlColumnLength ( iIndex ) };
if ( IsEmpty ( tResult ) )
tResult.first = nullptr;
return tResult;
}
Str_t CSphSource_SQL::SqlCompressedColumnStream ( int iFieldIndex )
{
return SqlColumnStream ( iFieldIndex );
}
void CSphSource_SQL::SqlCompressedColumnReleaseStream ( Str_t /*tStream*/ )
{
}
#if WITH_ZLIB
namespace {
Str_t UnpackZlib ( CSphVector<char>& tBuffer, Str_t tInputStream )
{
Str_t tResult { nullptr, 0 };
uLong uBufferOffset = 0;
z_stream tStream;
tStream.zalloc = Z_NULL;
tStream.zfree = Z_NULL;
tStream.opaque = Z_NULL;
tStream.avail_in = tInputStream.second;
tStream.next_in = (Bytef*)tInputStream.first;
tResult.second = inflateInit ( &tStream );
if ( tResult.second != Z_OK )
return tResult;
while ( true )
{
tStream.next_out = (Bytef*)&tBuffer[static_cast<int64_t> ( uBufferOffset )];
tStream.avail_out = tBuffer.GetLength() - uBufferOffset - 1;
tResult.second = inflate ( &tStream, Z_NO_FLUSH );
if ( tResult.second == Z_OK )
{
assert ( tStream.avail_out == 0 );
tBuffer.Resize ( tBuffer.GetLength() * 2 );
uBufferOffset = tStream.total_out;
continue;
}
if ( tResult.second == Z_STREAM_END )
{
tBuffer[static_cast<int64_t> ( tStream.total_out )] = '\0';
tResult.first = &tBuffer[0];
tResult.second = static_cast<int>(tStream.total_out);
}
break;
}
inflateEnd ( &tStream );
return tResult;
}
}
#endif
Str_t CSphSource_SQL::SqlUnpackColumn ( int iFieldIndex, ESphUnpackFormat eFormat )
{
int iIndex = m_tSchema.GetField ( iFieldIndex ).m_iIndex;
Str_t tResult { nullptr, 0 };
switch ( eFormat )
{
#if WITH_ZLIB
case SPH_UNPACK_ZLIB:
{
auto tSqlCompressedStream = SqlCompressedColumnStream ( iFieldIndex );
auto _ = AtScopeExit ( [tSqlCompressedStream, this] { SqlCompressedColumnReleaseStream ( tSqlCompressedStream ); } );
tResult = UnpackZlib ( m_dUnpackBuffers[iFieldIndex], tSqlCompressedStream );
}
break;
case SPH_UNPACK_MYSQL_COMPRESS:
{
auto tSqlStream = SqlColumnStream ( iFieldIndex );
if ( tSqlStream.second <= 4 )
{
if ( !m_bUnpackFailed )
{
m_bUnpackFailed = true;
sphWarn ( "failed to unpack '%s', invalid column size (size=%d), rowid=%u", SqlFieldName ( iIndex ), tSqlStream.second, m_tDocInfo.m_tRowID );
}
break;
}
uLong uSize = 0;
for ( int i = 0; i < 4; ++i )
uSize += ( static_cast<uLong> ( (BYTE)tSqlStream.first[i] ) ) << ( 8 * i );
uSize &= 0x3FFFFFFF;
if ( uSize > m_tParams.m_uUnpackMemoryLimit )
{
if ( !m_bUnpackOverflow )
{
m_bUnpackOverflow = true;
sphWarn ( "failed to unpack '%s', column size limit exceeded (size=%d), rowid=%u", SqlFieldName ( iIndex ), (int)uSize, m_tDocInfo.m_tRowID );
}
break;
}
CSphVector<char> & tBuffer = m_dUnpackBuffers[iFieldIndex];
tBuffer.Resize ( static_cast<int64_t> ( uSize ) + 1 );
unsigned long uLen = tSqlStream.second - 4;
int iResult = uncompress ( (Bytef*)tBuffer.Begin(), &uSize, (Bytef*)tSqlStream.first + 4, uLen );
if ( iResult == Z_OK )
{
tBuffer[static_cast<int64_t> ( uSize )] = 0;
tResult.first = &tBuffer[0];
tResult.second = static_cast<int> ( uSize );
} else
tResult.second = iResult;
}
break;
#endif
case SPH_UNPACK_NONE:
default:
tResult = SqlColumnStream ( iFieldIndex );
return tResult;
}
if ( !tResult.first )
{
ReportUnpackError ( iIndex, tResult.second );
tResult.second = 0;
}
return tResult;
}
struct CmpPairs_fn
{
bool IsLess ( const IDPair_t & tA, const IDPair_t & tB ) const
{
if ( tA.m_tDocID < tB.m_tDocID )
return true;
else if ( tA.m_tDocID > tB.m_tDocID )
return false;
return tA.m_tRowID < tB.m_tRowID;
}
bool IsEq ( const IDPair_t & tA, const IDPair_t & tB ) const
{
return tA.m_tDocID==tB.m_tDocID;
}
};
static uint64_t CreateKey ( DocID_t tDocID, int iEntry )
{
uint64_t uRes = sphFNV64 ( &tDocID, sizeof(tDocID) );
return sphFNV64 ( &iEntry, sizeof(iEntry), uRes );
}
bool CSphSource_SQL::FetchJoinedFields ( CSphAutofile & tFile, CSphVector<std::unique_ptr<OpenHashTable_T<uint64_t, uint64_t>>> & dJoinedOffsets, CSphString & sError )
{
if ( m_iJoinedHitField>=m_tSchema.GetFieldsCount() )
{
m_iJoinedHitField = -1;
return true;
}
if ( !QueryPreAll ( sError ) )
return false;
dJoinedOffsets.Resize(m_tSchema.GetFieldsCount());
CSphWriter tWriter;
tWriter.SetFile ( tFile, nullptr, sError );
bool bProcessingRanged = true;
while ( m_iJoinedHitField<m_tSchema.GetFieldsCount() )
{
if ( SqlFetchRow() )
{
if ( !dJoinedOffsets[m_iJoinedHitField] )
dJoinedOffsets[m_iJoinedHitField] = std::make_unique<OpenHashTable_T<uint64_t, uint64_t>>();
auto & hOffsets = *dJoinedOffsets[m_iJoinedHitField];
DocID_t tDocId = sphToInt64 ( SqlColumn(0) ); // FIXME! handle conversion errors and zero/max values?
int iEntry=0;
while ( hOffsets.Find ( CreateKey ( tDocId, iEntry ) ) )
iEntry++;
// add only if there's no existing entry
hOffsets.Add ( CreateKey ( tDocId, iEntry ), tWriter.GetPos() );
tWriter.ZipOffset(tDocId);
tWriter.ZipInt(m_iJoinedHitField);
if ( m_tSchema.GetField(m_iJoinedHitField).m_bPayload )
tWriter.ZipInt ( sphToDword ( SqlColumn(2) ) );
BYTE * pText = (BYTE *)const_cast<char*>( SqlColumn(1) );
DWORD uLength = SqlColumnLength(1);
tWriter.ZipInt(uLength);
tWriter.PutBytes ( pText, uLength );
}
else if ( SqlIsError() )
{
// error while fetching row
sError = SqlError();
return false;
} else
{
int iLastField = m_iJoinedHitField;
bool bRanged = ( m_iJoinedHitField>=m_iPlainFieldsLength && m_iJoinedHitField<m_tSchema.GetFieldsCount()
&& ( m_tSchema.GetField(m_iJoinedHitField).m_eSrc==SPH_ATTRSRC_RANGEDQUERY || m_tSchema.GetField(m_iJoinedHitField).m_eSrc==SPH_ATTRSRC_RANGEDMAINQUERY ) );
// current field is over, continue to next field
if ( m_iJoinedHitField<0 )
m_iJoinedHitField = m_iPlainFieldsLength;
else if ( !bRanged || !bProcessingRanged )
m_iJoinedHitField++;
SqlDismissResult();
// eof check
if ( m_iJoinedHitField>=m_tSchema.GetFieldsCount() )
{
m_iJoinedHitField = -1;
return true;
}
bProcessingRanged = false;
bool bCheckNumFields = true;
const CSphColumnInfo & tJoined = m_tSchema.GetField(m_iJoinedHitField);
bool bJoinedRanged = ( tJoined.m_eSrc==SPH_ATTRSRC_RANGEDQUERY || tJoined.m_eSrc==SPH_ATTRSRC_RANGEDMAINQUERY );
// start fetching next field
if ( !bJoinedRanged )
{
if ( !SqlQuery ( tJoined.m_sQuery.cstr() ) )
{
sError = SqlError();
return false;
}
}
else
{
m_tParams.m_iRangeStep = m_tParams.m_iRefRangeStep;
// setup ranges for next field
if ( iLastField!=m_iJoinedHitField )
{
const CSphString & sRange = ( tJoined.m_eSrc==SPH_ATTRSRC_RANGEDQUERY ? tJoined.m_sQueryRange : m_tParams.m_sQueryRange );
CSphString sPrefix;
sPrefix.SetSprintf ( "joined field '%s' ranged query: ", tJoined.m_sName.cstr() );
if ( !SetupRanges ( sRange.cstr(), tJoined.m_sQuery.cstr(), sPrefix.cstr(), sError, SRE_JOINEDHITS ) )
return false;
m_tCurrentID = m_tMinID;
}
// run first step (in order to report errors)
bool bRes = RunQueryStep ( tJoined.m_sQuery.cstr(), sError );
bProcessingRanged = bRes; // select next documents in range or loop once to process next field
bCheckNumFields = bRes;
if ( !sError.IsEmpty() )
return false;
}
const int iExpected = m_tSchema.GetField(m_iJoinedHitField).m_bPayload ? 3 : 2;
if ( bCheckNumFields && SqlNumFields()!=iExpected )
{
const char * szName = m_tSchema.GetField(m_iJoinedHitField).m_sName.cstr();
sError.SetSprintf ( "joined field '%s': query MUST return exactly %d columns, got %d", szName, iExpected, SqlNumFields() );
return false;
}
}
}
m_iJoinedHitField = -1;
return true;
}
ISphHits * CSphSource_SQL::IterateJoinedHits ( CSphReader & tReader, CSphString & sError )
{
// iterating of joined hits happens after iterating hits from main query
// so we may be sure at this moment no new IDs will be put in m_dAllIds
if ( !m_bIdsSorted )
{
// sorted by docids, but we may have duplicates
m_dAllIds.Sort ( CmpPairs_fn() );
IDPair_t * pStart = m_dAllIds.Begin();
int iLeft = sphUniq ( pStart, m_dAllIds.GetLength(), CmpPairs_fn() );
m_dAllIds.Resize(iLeft);
m_bIdsSorted = true;
m_iJoinedFileSize = tReader.GetFilesize();
}
m_tHits.Resize(0);
while ( m_tState.m_bProcessingHits || tReader.GetPos()<m_iJoinedFileSize )
{
if ( !m_tState.m_bProcessingHits )
{
DocID_t tDocId = tReader.UnzipOffset();
int iField = tReader.UnzipInt();
int iStartPos = 0;
if ( m_tSchema.GetField(iField).m_bPayload )
iStartPos = tReader.UnzipInt();
DWORD uLength = tReader.UnzipInt();
m_dJoinedField.Resize(uLength+1);
tReader.GetBytes ( m_dJoinedField.Begin(), uLength );
m_dJoinedField[uLength] = '\0';
// lets skip joined document totally if there was no such document ID returned by main query
const IDPair_t * pIdPair = m_dAllIds.BinarySearch ( bind ( &IDPair_t::m_tDocID ), tDocId );
if ( !pIdPair )
continue;
// next field/document? reset position
if ( tDocId!=m_iJoinedHitID || iField!=m_iJoinedHitField )
{
m_iJoinedHitField = iField;
m_iJoinedHitID = tDocId;
m_iJoinedHitPos = 0;
}
m_tState = CSphBuildHitsState_t();
m_tState.m_iField = m_iJoinedHitField;
m_tState.m_iStartField = m_iJoinedHitField;
m_tState.m_iEndField = m_iJoinedHitField+1;
if ( m_tSchema.GetField(m_iJoinedHitField).m_bPayload )
m_tState.m_iStartPos = iStartPos;
else
m_tState.m_iStartPos = m_iJoinedHitPos;
// build those hits
m_pJoinedFields = m_dJoinedField.Begin();
m_tState.m_dFields = &m_pJoinedFields;
m_tState.m_dFieldLengths.Resize(1);
m_tState.m_dFieldLengths[0] = uLength;
m_tDocInfo.m_tRowID = pIdPair->m_tRowID;
}
BuildHits ( sError, true );
// update current position
if ( !m_tSchema.GetField(m_iJoinedHitField).m_bPayload && !m_tState.m_bProcessingHits && m_tHits.GetLength() )
m_iJoinedHitPos = HITMAN::GetPos ( m_tHits.Last().m_uWordPos );
if ( m_tState.m_bProcessingHits )
break;
}
// eof check
if ( !m_tState.m_bProcessingHits && tReader.GetPos()>=m_iJoinedFileSize )
m_tDocInfo.m_tRowID = ( m_tHits.GetLength() ? 0 : INVALID_ROWID );
return &m_tHits;
}
| 41,540
|
C++
|
.cpp
| 1,298
| 28.918336
| 167
| 0.665849
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,974
|
source_document.cpp
|
manticoresoftware_manticoresearch/src/indexing_sources/source_document.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "source_document.h"
#include "stripper/html_stripper.h"
#include "tokenizer/tokenizer.h"
#include "sphinxint.h"
void CSphSource::SetDict ( const DictRefPtr_c& pDict )
{
assert ( pDict );
m_pDict = pDict;
}
const CSphSourceStats & CSphSource::GetStats ()
{
return m_tStats;
}
bool CSphSource::SetStripHTML ( const char * sExtractAttrs, const char * sRemoveElements, bool bDetectParagraphs, const char * sZones, CSphString & sError )
{
if ( !m_pStripper )
m_pStripper = new CSphHTMLStripper ( true );
if ( !m_pStripper->SetIndexedAttrs ( sExtractAttrs, sError ) )
return false;
if ( !m_pStripper->SetRemovedElements ( sRemoveElements, sError ) )
return false;
if ( bDetectParagraphs )
m_pStripper->EnableParagraphs ();
if ( !m_pStripper->SetZones ( sZones, sError ) )
return false;
return true;
}
void CSphSource::SetFieldFilter ( std::unique_ptr<ISphFieldFilter> pFilter )
{
m_pFieldFilter = std::move ( pFilter );
}
void CSphSource::SetTokenizer ( TokenizerRefPtr_c pTokenizer )
{
assert ( pTokenizer );
m_pTokenizer = std::move ( pTokenizer );
}
bool CSphSource::UpdateSchema ( CSphSchema * pInfo, CSphString & sError )
{
assert ( pInfo );
// fill it
if ( pInfo->GetFieldsCount()==0 && pInfo->GetAttrsCount()==0 )
{
*pInfo = m_tSchema;
return true;
}
// check it
return m_tSchema.CompareTo ( *pInfo, sError );
}
void CSphSource::Setup ( const CSphSourceSettings & tSettings, StrVec_t * pWarnings )
{
SetMinPrefixLen ( Max ( tSettings.RawMinPrefixLen(), 0 ) );
m_iMinInfixLen = Max ( tSettings.m_iMinInfixLen, 0 );
m_iMaxSubstringLen = Max ( tSettings.m_iMaxSubstringLen, 0 );
m_iBoundaryStep = Max ( tSettings.m_iBoundaryStep, -1 );
m_bIndexExactWords = tSettings.m_bIndexExactWords;
m_iOvershortStep = Min ( Max ( tSettings.m_iOvershortStep, 0 ), 1 );
m_iStopwordStep = Min ( Max ( tSettings.m_iStopwordStep, 0 ), 1 );
m_bIndexSP = tSettings.m_bIndexSP;
m_dPrefixFields = tSettings.m_dPrefixFields;
m_dInfixFields = tSettings.m_dInfixFields;
m_dStoredFields = tSettings.m_dStoredFields;
m_dStoredOnlyFields = tSettings.m_dStoredOnlyFields;
m_dColumnarAttrs = tSettings.m_dColumnarAttrs;
m_dColumnarNonStoredAttrs = tSettings.m_dColumnarNonStoredAttrs;
m_dRowwiseAttrs = tSettings.m_dRowwiseAttrs;
m_dColumnarStringsNoHash = tSettings.m_dColumnarStringsNoHash;
m_dKNN = tSettings.m_dKNN;
m_dJsonSIAttrs = tSettings.m_dJsonSIAttrs;
m_bIndexFieldLens = tSettings.m_bIndexFieldLens;
m_eEngine = tSettings.m_eEngine;
m_tSchema.SetupFlags ( *this, false, pWarnings );
}
bool CSphSource::SetupMorphFields ( CSphString & sError )
{
return ParseMorphFields ( m_pDict->GetSettings().m_sMorphology, m_pDict->GetSettings().m_sMorphFields, m_tSchema.GetFields(), m_tMorphFields, sError );
}
ISphHits * CSphSource::IterateJoinedHits ( CSphReader & tReader, CSphString & )
{
static ISphHits dDummy;
m_tDocInfo.m_tRowID = INVALID_ROWID; // pretend that's an eof
return &dDummy;
}
/////////////////////////////////////////////////////////////////////////////
// DOCUMENT SOURCE
/////////////////////////////////////////////////////////////////////////////
CSphSource::CSphBuildHitsState_t::CSphBuildHitsState_t ()
{
Reset();
}
CSphSource::CSphBuildHitsState_t::~CSphBuildHitsState_t ()
{
Reset();
}
void CSphSource::CSphBuildHitsState_t::Reset ()
{
m_bProcessingHits = false;
m_bDocumentDone = false;
m_dFields = nullptr;
m_dFieldLengths.Resize(0);
m_iStartPos = 0;
m_iHitPos = 0;
m_iField = 0;
m_iStartField = 0;
m_iEndField = 0;
m_iBuildLastStep = 1;
ARRAY_FOREACH ( i, m_dTmpFieldStorage )
SafeDeleteArray ( m_dTmpFieldStorage[i] );
m_dTmpFieldStorage.Resize ( 0 );
m_dTmpFieldPtrs.Resize ( 0 );
m_dFiltered.Resize ( 0 );
}
CSphSource::CSphSource ( const char * sName )
: m_iMaxHits ( MAX_SOURCE_HITS )
, m_tSchema ( sName )
{
}
CSphSource::~CSphSource()
{
SafeDeleteArray ( m_pReadFileBuffer );
SafeDelete ( m_pStripper );
}
bool CSphSource::IterateDocument ( bool & bEOF, CSphString & sError )
{
assert ( m_pTokenizer );
assert ( !m_tState.m_bProcessingHits );
m_tHits.Resize ( 0 );
m_tState.Reset();
m_tState.m_iEndField = m_iPlainFieldsLength;
m_tState.m_dFieldLengths.Resize ( m_tState.m_iEndField );
if ( m_pFieldFilter )
{
m_tState.m_dTmpFieldPtrs.Resize ( m_tState.m_iEndField );
m_tState.m_dTmpFieldStorage.Resize ( m_tState.m_iEndField );
ARRAY_FOREACH ( i, m_tState.m_dTmpFieldPtrs )
{
m_tState.m_dTmpFieldPtrs[i] = NULL;
m_tState.m_dTmpFieldStorage[i] = NULL;
}
}
m_dDocFields.Resize ( m_tSchema.GetFieldsCount() );
for ( auto & i : m_dDocFields )
i.Resize(0);
// clean up field length counters
if ( m_pFieldLengthAttrs )
memset ( m_pFieldLengthAttrs, 0, sizeof ( DWORD ) * m_tSchema.GetFieldsCount() );
// fetch next document
while (true)
{
m_tState.m_dFields = NextDocument ( bEOF, sError );
if ( bEOF )
return ( sError.IsEmpty() );
if ( !m_tState.m_dFields && !sError.IsEmpty() )
return false;
const int * pFieldLengths = GetFieldLengths ();
for ( int iField=0; iField<m_tState.m_iEndField; iField++ )
{
m_tState.m_dFieldLengths[iField] = pFieldLengths[iField];
if ( m_tSchema.GetField(iField).m_uFieldFlags & CSphColumnInfo::FIELD_STORED )
{
int iFieldLen = m_tState.m_dFieldLengths[iField];
m_dDocFields[iField].Resize(iFieldLen);
memcpy ( m_dDocFields[iField].Begin(), m_tState.m_dFields[iField], iFieldLen );
}
}
// tricky bit
// we can only skip document indexing from here, IterateHits() is too late
// so in case the user chose to skip documents with file field problems
// we need to check for those here
if ( m_eOnFileFieldError==FFE_SKIP_DOCUMENT || m_eOnFileFieldError==FFE_FAIL_INDEX )
{
bool bOk = true;
for ( int iField=0; iField<m_tState.m_iEndField && bOk; iField++ )
{
const BYTE * sFilename = m_tState.m_dFields[iField];
if ( m_tSchema.GetField(iField).m_bFilename )
bOk &= CheckFileField ( sFilename );
if ( !bOk && m_eOnFileFieldError==FFE_FAIL_INDEX )
{
sError.SetSprintf ( "error reading file field data (docid=" INT64_FMT ", filename=%s)", m_dAttrs[0], sFilename );
return false;
}
}
if ( !bOk && m_eOnFileFieldError==FFE_SKIP_DOCUMENT )
continue;
}
if ( m_pFieldFilter )
{
bool bHaveModifiedFields = false;
for ( int iField=0; iField<m_tState.m_iEndField; iField++ )
{
if ( m_tSchema.GetField(iField).m_bFilename )
{
m_tState.m_dTmpFieldPtrs[iField] = m_tState.m_dFields[iField];
continue;
}
CSphVector<BYTE> dFiltered;
int iFilteredLen = m_pFieldFilter->Apply ( m_tState.m_dFields[iField], m_tState.m_dFieldLengths[iField], dFiltered, false );
if ( iFilteredLen )
{
m_tState.m_dTmpFieldStorage[iField] = dFiltered.LeakData();
m_tState.m_dTmpFieldPtrs[iField] = m_tState.m_dTmpFieldStorage[iField];
m_tState.m_dFieldLengths[iField] = iFilteredLen;
bHaveModifiedFields = true;
} else
m_tState.m_dTmpFieldPtrs[iField] = m_tState.m_dFields[iField];
}
if ( bHaveModifiedFields )
m_tState.m_dFields = (BYTE **)&( m_tState.m_dTmpFieldPtrs[0] );
}
// we're good
break;
}
++m_tStats.m_iTotalDocuments;
return true;
}
// hack notification for joined fields
void CSphSource::RowIDAssigned ( DocID_t tDocID, RowID_t tRowID )
{
if ( HasJoinedFields() )
{
IDPair_t & tPair = m_dAllIds.Add();
tPair.m_tDocID = tDocID;
tPair.m_tRowID = tRowID;
}
}
ISphHits * CSphSource::IterateHits ( CSphString & sError )
{
if ( m_tState.m_bDocumentDone )
return NULL;
m_tHits.Resize ( 0 );
BuildHits ( sError, false );
return &m_tHits;
}
bool CSphSource::CheckFileField ( const BYTE * sField )
{
CSphAutofile tFileSource;
CSphString sError;
if ( tFileSource.Open ( (const char *)sField, SPH_O_READ, sError )==-1 )
{
sphWarning ( "%s", sError.cstr() );
return false;
}
int64_t iFileSize = tFileSource.GetSize();
if ( iFileSize+16 > m_iMaxFileBufferSize )
{
sphWarning ( "file '%s' too big for a field (size=" INT64_FMT ", max_file_field_buffer=%d)", (const char *)sField, iFileSize, m_iMaxFileBufferSize );
return false;
}
return true;
}
/// returns file size on success, and replaces *ppField with a pointer to data
/// returns -1 on failure (and emits a warning)
int CSphSource::LoadFileField ( BYTE ** ppField, CSphString & sError )
{
CSphAutofile tFileSource;
BYTE * sField = *ppField;
if ( tFileSource.Open ( (const char *)sField, SPH_O_READ, sError )==-1 )
{
sphWarning ( "%s", sError.cstr() );
return -1;
}
int64_t iFileSize = tFileSource.GetSize();
if ( iFileSize+16 > m_iMaxFileBufferSize )
{
sphWarning ( "file '%s' too big for a field (size=" INT64_FMT ", max_file_field_buffer=%d)", (const char *)sField, iFileSize, m_iMaxFileBufferSize );
return -1;
}
int iFieldBytes = (int)iFileSize;
if ( !iFieldBytes )
return 0;
int iBufSize = Max ( m_iReadFileBufferSize, 1 << sphLog2 ( iFieldBytes+15 ) );
if ( m_iReadFileBufferSize < iBufSize )
SafeDeleteArray ( m_pReadFileBuffer );
if ( !m_pReadFileBuffer )
{
m_pReadFileBuffer = new char [ iBufSize ];
m_iReadFileBufferSize = iBufSize;
}
if ( !tFileSource.Read ( m_pReadFileBuffer, iFieldBytes, sError ) )
{
sphWarning ( "read failed: %s", sError.cstr() );
return -1;
}
m_pReadFileBuffer[iFieldBytes] = '\0';
*ppField = (BYTE*)m_pReadFileBuffer;
return iFieldBytes;
}
bool AddFieldLens ( CSphSchema & tSchema, bool bDynamic, CSphString & sError )
{
for ( int i = 0; i < tSchema.GetFieldsCount(); i++ )
{
CSphColumnInfo tCol;
tCol.m_sName.SetSprintf ( "%s_len", tSchema.GetFieldName(i) );
int iGot = tSchema.GetAttrIndex ( tCol.m_sName.cstr() );
if ( iGot>=0 )
{
if ( tSchema.GetAttr(iGot).m_eAttrType==SPH_ATTR_TOKENCOUNT )
{
// looks like we already added these
assert ( tSchema.GetAttr(iGot).m_sName==tCol.m_sName );
return true;
}
sError.SetSprintf ( "attribute %s conflicts with index_field_lengths=1; remove it", tCol.m_sName.cstr() );
return false;
}
tCol.m_eAttrType = SPH_ATTR_TOKENCOUNT;
tSchema.AddAttr ( tCol, bDynamic ); // everything's dynamic at indexing time
}
return true;
}
bool CSphSource::AddAutoAttrs ( CSphString & sError, StrVec_t * pDefaults )
{
int iSchemaId = m_tSchema.GetAttrIndex ( sphGetDocidName() );
// id is the first attr
if ( iSchemaId!=-1 )
{
const CSphColumnInfo & tCol = m_tSchema.GetAttr ( iSchemaId );
if ( iSchemaId!=0 )
{
sError.SetSprintf ( "can not define auto-defined '%s' attribute", tCol.m_sName.cstr() );
return false;
}
if ( tCol.m_eAttrType!=SPH_ATTR_BIGINT )
{
sError.SetSprintf ( "can not define auto-defined '%s' attribute with the wrong type '%s', should be '%s'", tCol.m_sName.cstr(), AttrType2Str ( tCol.m_eAttrType ), AttrType2Str ( SPH_ATTR_BIGINT ) );
return false;
}
} else
{
CSphColumnInfo tCol ( sphGetDocidName() );
tCol.m_eAttrType = SPH_ATTR_BIGINT;
for ( const auto & i : m_dColumnarAttrs )
if ( i==tCol.m_sName )
tCol.m_uAttrFlags |= CSphColumnInfo::ATTR_COLUMNAR;
m_tSchema.InsertAttr ( 0, tCol, true );
if ( pDefaults )
pDefaults->Insert ( 0, "" );
}
if ( m_tSchema.HasBlobAttrs() && !m_tSchema.GetAttr ( sphGetBlobLocatorName() ) )
{
CSphColumnInfo tCol ( sphGetBlobLocatorName() );
tCol.m_eAttrType = SPH_ATTR_BIGINT;
// should be right after docid
m_tSchema.InsertAttr ( 1, tCol, true );
if ( pDefaults )
pDefaults->Insert ( 1, "" );
}
// rebuild locators in the schema
const char * szTmpColName = "$_tmp";
CSphColumnInfo tCol ( szTmpColName, SPH_ATTR_BIGINT );
m_tSchema.AddAttr ( tCol, true );
m_tSchema.RemoveAttr ( szTmpColName, true );
// auto-computed length attributes
if ( m_bIndexFieldLens && !AddFieldLens ( m_tSchema, true, sError ) )
return false;
return true;
}
void CSphSource::AllocDocinfo()
{
// tricky bit
// with in-config schema, attr storage gets allocated in Setup() when source is initially created
// so when this AddAutoAttrs() additionally changes the count, we have to change the number of attributes
// but Reset() prohibits that, because that is usually a programming mistake, hence the Swap() dance
CSphMatch tNew;
tNew.Reset ( m_tSchema.GetRowSize() );
Swap ( m_tDocInfo, tNew );
m_dStrAttrs.Resize ( m_tSchema.GetAttrsCount() );
m_dAttrs.Resize ( m_tSchema.GetAttrsCount() );
if ( m_bIndexFieldLens && m_tSchema.GetAttrsCount() && m_tSchema.GetFieldsCount() )
{
int iFirst = m_tSchema.GetAttrId_FirstFieldLen();
assert ( m_tSchema.GetAttr ( iFirst ).m_eAttrType==SPH_ATTR_TOKENCOUNT );
assert ( m_tSchema.GetAttr ( iFirst+m_tSchema.GetFieldsCount()-1 ).m_eAttrType==SPH_ATTR_TOKENCOUNT );
m_pFieldLengthAttrs = m_tDocInfo.m_pDynamic + ( m_tSchema.GetAttr ( iFirst ).m_tLocator.m_iBitOffset / 32 );
}
}
//////////////////////////////////////////////////////////////////////////
// HIT GENERATORS
//////////////////////////////////////////////////////////////////////////
bool CSphSource::BuildZoneHits ( RowID_t tRowID, BYTE uCode )
{
switch (uCode)
{
case MAGIC_CODE_SENTENCE:
m_tHits.Add ( { tRowID, m_pDict->GetWordID ( (BYTE *)const_cast<char*>(MAGIC_WORD_SENTENCE) ), m_tState.m_iHitPos } );
m_tState.m_iBuildLastStep = 1;
return true;
case MAGIC_CODE_PARAGRAPH:
m_tHits.Add ( { tRowID, m_pDict->GetWordID ( (BYTE *)const_cast<char*>(MAGIC_WORD_SENTENCE) ), m_tState.m_iHitPos } );
m_tHits.Add ( { tRowID, m_pDict->GetWordID ( (BYTE *)const_cast<char*>(MAGIC_WORD_PARAGRAPH) ), m_tState.m_iHitPos } );
m_tState.m_iBuildLastStep = 1;
return true;
case MAGIC_CODE_ZONE:
m_tHits.Add ( { tRowID, m_pDict->GetWordID ( (BYTE *)const_cast<char*>(MAGIC_WORD_SENTENCE) ), m_tState.m_iHitPos } );
m_tHits.Add ( { tRowID, m_pDict->GetWordID ( (BYTE *)const_cast<char*>(MAGIC_WORD_PARAGRAPH) ), m_tState.m_iHitPos } );
{
BYTE * pZone = (BYTE*)const_cast<char *> ( m_pTokenizer->GetBufferPtr() );
BYTE * pEnd = pZone;
while ( *pEnd && *pEnd!=MAGIC_CODE_ZONE )
++pEnd;
if ( *pEnd==MAGIC_CODE_ZONE )
{
*pEnd = '\0';
m_tHits.Add ( { tRowID, m_pDict->GetWordID ( pZone-1 ), m_tState.m_iHitPos } );
m_pTokenizer->SetBufferPtr ( (const char *) pEnd+1 );
}
}
m_tState.m_iBuildLastStep = 1;
return true;
default:
return false;
}
}
// track blended start and reset on not blended token
static int TrackBlendedStart ( const TokenizerRefPtr_c& pTokenizer, int iBlendedHitsStart, int iHitsCount )
{
if ( pTokenizer->TokenIsBlended() )
return iHitsCount;
return pTokenizer->TokenIsBlendedPart () ? iBlendedHitsStart : -1;
}
#define BUILD_SUBSTRING_HITS_COUNT 4
void CSphSource::BuildSubstringHits ( RowID_t tRowID, bool bPayload, ESphWordpart eWordpart, int & iBlendedHitsStart )
{
bool bPrefixField = ( eWordpart==SPH_WORDPART_PREFIX );
bool bInfixMode = m_iMinInfixLen > 0;
int iMinInfixLen = bPrefixField ? GetMinPrefixLen ( false ) : m_iMinInfixLen;
if ( !m_tState.m_bProcessingHits )
m_tState.m_iBuildLastStep = 1;
BYTE * sWord = NULL;
BYTE sBuf [ 16+3*SPH_MAX_WORD_LEN ];
int iIterHitCount = BUILD_SUBSTRING_HITS_COUNT;
if ( bPrefixField )
iIterHitCount += SPH_MAX_WORD_LEN - GetMinPrefixLen ( false );
else
iIterHitCount += ( ( m_iMinInfixLen+SPH_MAX_WORD_LEN ) * ( SPH_MAX_WORD_LEN-m_iMinInfixLen ) / 2 );
// FIELDEND_MASK at blended token stream should be set for HEAD token too
iBlendedHitsStart = -1;
// index all infixes
while ( ( m_iMaxHits==0 || m_tHits.GetLength()+iIterHitCount<m_iMaxHits )
&& ( sWord = m_pTokenizer->GetToken() )!=NULL )
{
int iLastBlendedStart = TrackBlendedStart ( m_pTokenizer, iBlendedHitsStart, m_tHits.GetLength() );
if ( !bPayload )
{
HITMAN::AddPos ( &m_tState.m_iHitPos, Max ( m_tState.m_iBuildLastStep + m_pTokenizer->GetOvershortCount()*m_iOvershortStep, 0 ) );
if ( m_pTokenizer->GetBoundary() )
HITMAN::AddPos ( &m_tState.m_iHitPos, m_iBoundaryStep );
m_tState.m_iBuildLastStep = 1;
}
if ( BuildZoneHits ( tRowID, *sWord ) )
continue;
int iLen = m_pTokenizer->GetLastTokenLen ();
// always index full word (with magic head/tail marker(s))
auto iBytes = (int) strlen ( (const char*)sWord );
memcpy ( sBuf + 1, sWord, iBytes );
sBuf[iBytes+1] = '\0';
SphWordID_t uExactWordid = 0;
if ( m_bIndexExactWords )
{
sBuf[0] = MAGIC_WORD_HEAD_NONSTEMMED;
uExactWordid = m_pDict->GetWordIDNonStemmed ( sBuf );
}
sBuf[0] = MAGIC_WORD_HEAD;
// stemmed word w/markers
SphWordID_t iWord = m_pDict->GetWordIDWithMarkers ( sBuf );
if ( !iWord )
{
m_tState.m_iBuildLastStep = m_iStopwordStep;
continue;
}
if ( m_bIndexExactWords )
m_tHits.Add ( { tRowID, uExactWordid, m_tState.m_iHitPos } );
iBlendedHitsStart = iLastBlendedStart;
m_tHits.Add ( { tRowID, iWord, m_tState.m_iHitPos } );
m_tState.m_iBuildLastStep = m_pTokenizer->TokenIsBlended() ? 0 : 1;
// restore stemmed word
auto iStemmedLen = (int) strlen ( ( const char *)sBuf );
sBuf [iStemmedLen - 1] = '\0';
// stemmed word w/o markers
if ( strcmp ( (const char *)sBuf + 1, (const char *)sWord ) )
m_tHits.Add ( { tRowID, m_pDict->GetWordID ( sBuf + 1, iStemmedLen - 2, true ), m_tState.m_iHitPos } );
// restore word
memcpy ( sBuf + 1, sWord, iBytes );
sBuf[iBytes+1] = MAGIC_WORD_TAIL;
sBuf[iBytes+2] = '\0';
// if there are no infixes, that's it
if ( iMinInfixLen > iLen )
{
// index full word
m_tHits.Add ( { tRowID, m_pDict->GetWordID ( sWord ), m_tState.m_iHitPos } );
continue;
}
// process all infixes
int iMaxStart = bPrefixField ? 0 : ( iLen - iMinInfixLen );
BYTE * sInfix = sBuf + 1;
for ( int iStart=0; iStart<=iMaxStart; iStart++ )
{
BYTE * sInfixEnd = sInfix;
for ( int i = 0; i < iMinInfixLen; i++ )
sInfixEnd += m_pTokenizer->GetCodepointLength ( *sInfixEnd );
int iMaxSubLen = ( iLen-iStart );
if ( m_iMaxSubstringLen )
iMaxSubLen = Min ( m_iMaxSubstringLen, iMaxSubLen );
for ( int i=iMinInfixLen; i<=iMaxSubLen; i++ )
{
m_tHits.Add ( { tRowID, m_pDict->GetWordID ( sInfix, int ( sInfixEnd-sInfix ), false ), m_tState.m_iHitPos } );
// word start: add magic head
if ( bInfixMode && iStart==0 )
m_tHits.Add ( { tRowID, m_pDict->GetWordID ( sInfix - 1, int ( sInfixEnd-sInfix ) + 1, false ), m_tState.m_iHitPos } );
// word end: add magic tail
if ( bInfixMode && i==iLen-iStart )
m_tHits.Add ( { tRowID, m_pDict->GetWordID ( sInfix, int ( sInfixEnd-sInfix ) + 1, false ), m_tState.m_iHitPos } );
sInfixEnd += m_pTokenizer->GetCodepointLength ( *sInfixEnd );
}
sInfix += m_pTokenizer->GetCodepointLength ( *sInfix );
}
}
m_tState.m_bProcessingHits = ( sWord!=NULL );
}
#define BUILD_REGULAR_HITS_COUNT 6
void CSphSource::BuildRegularHits ( RowID_t tRowID, bool bPayload, int & iBlendedHitsStart )
{
bool bWordDict = m_pDict->GetSettings().m_bWordDict;
bool bGlobalPartialMatch = !bWordDict && ( GetMinPrefixLen ( bWordDict ) > 0 || m_iMinInfixLen > 0 );
if ( !m_tState.m_bProcessingHits )
m_tState.m_iBuildLastStep = 1;
BYTE * sWord = NULL;
BYTE sBuf [ 16+3*SPH_MAX_WORD_LEN ];
// FIELDEND_MASK at last token stream should be set for HEAD token too
iBlendedHitsStart = -1;
// bMorphDisabled introduced in e0f8754e
bool bMorphDisabled = !m_tMorphFields.BitGetOr ( m_tState.m_iField, true );
// index words only
while ( ( m_iMaxHits==0 || m_tHits.GetLength()+BUILD_REGULAR_HITS_COUNT<m_iMaxHits )
&& ( sWord = m_pTokenizer->GetToken() )!=NULL )
{
int iLastBlendedStart = TrackBlendedStart ( m_pTokenizer, iBlendedHitsStart, m_tHits.GetLength() );
if ( !bPayload )
{
HITMAN::AddPos ( &m_tState.m_iHitPos, Max ( m_tState.m_iBuildLastStep + m_pTokenizer->GetOvershortCount()*m_iOvershortStep, 0 ) );
if ( m_pTokenizer->GetBoundary() )
HITMAN::AddPos ( &m_tState.m_iHitPos, m_iBoundaryStep );
}
if ( BuildZoneHits ( tRowID, *sWord ) )
continue;
if ( bGlobalPartialMatch )
{
auto iBytes = strlen ( (const char*)sWord );
memcpy ( sBuf + 1, sWord, iBytes );
sBuf[0] = MAGIC_WORD_HEAD;
sBuf[iBytes+1] = '\0';
m_tHits.Add ( { tRowID, m_pDict->GetWordIDWithMarkers ( sBuf ), m_tState.m_iHitPos } );
}
ESphTokenMorph eMorph = m_pTokenizer->GetTokenMorph();
if ( m_bIndexExactWords && eMorph != SPH_TOKEN_MORPH_GUESS )
{
auto iBytes = strlen ( (const char*)sWord );
memcpy ( sBuf + 1, sWord, iBytes );
sBuf[0] = MAGIC_WORD_HEAD_NONSTEMMED;
sBuf[iBytes + 1] = '\0';
if ( eMorph == SPH_TOKEN_MORPH_ORIGINAL || bMorphDisabled )
{
// can not use GetWordID here due to exception vs missed hit, ie
// stemmed sWord hasn't got added to hit stream but might be added as exception to dictionary
// that causes error at hit sorting phase \ dictionary HitblockPatch
if ( !m_pDict->GetSettings().m_bStopwordsUnstemmed )
m_pDict->ApplyStemmers ( sWord );
if ( !m_pDict->IsStopWord ( sWord ) )
m_tHits.Add ( { tRowID, m_pDict->GetWordIDNonStemmed ( sBuf ), m_tState.m_iHitPos } );
m_tState.m_iBuildLastStep = m_pTokenizer->TokenIsBlended() ? 0 : 1;
continue;
}
}
SphWordID_t iWord = ( eMorph==SPH_TOKEN_MORPH_GUESS )
? m_pDict->GetWordIDNonStemmed ( sWord ) // tokenizer did morphology => dict must not stem
: m_pDict->GetWordID ( sWord ); // tokenizer did not => stemmers can be applied
if ( iWord )
{
#if 0
if ( HITMAN::GetPos ( m_tState.m_iHitPos )==1 )
printf ( "\n" );
printf ( "doc %d. pos %d. %s\n", uDocid, HITMAN::GetPos ( m_tState.m_iHitPos ), sWord );
#endif
iBlendedHitsStart = iLastBlendedStart;
m_tState.m_iBuildLastStep = m_pTokenizer->TokenIsBlended() ? 0 : 1;
m_tHits.Add ( { tRowID, iWord, m_tState.m_iHitPos } );
if ( m_bIndexExactWords && eMorph!=SPH_TOKEN_MORPH_GUESS )
m_tHits.Add ( { tRowID, m_pDict->GetWordIDNonStemmed ( sBuf ), m_tState.m_iHitPos } );
} else
{
// need to count all blended part tokens to match query
if ( m_pTokenizer->TokenIsBlended() )
m_tState.m_iBuildLastStep = 0;
else
m_tState.m_iBuildLastStep = ( m_pTokenizer->TokenIsBlendedPart() ? 1 : m_iStopwordStep );
}
}
m_tState.m_bProcessingHits = ( sWord!=NULL );
}
static void CountFieldLengths ( const VecTraits_T<CSphWordHit> & dHits, DWORD * pFieldLengthAttrs )
{
const CSphWordHit * pHit = dHits.Begin();
if ( !pHit )
return;
const CSphWordHit * pEnd = dHits.End();
assert ( pEnd );
Hitpos_t uLastHit = pHit->m_uWordPos;
DWORD uLastCount = 1;
for ( ; pHit!=pEnd; pHit++ )
{
if ( HITMAN::GetField ( uLastHit )!=HITMAN::GetField ( pHit->m_uWordPos ) )
{
pFieldLengthAttrs [ HITMAN::GetField ( uLastHit ) ] += uLastCount;
uLastCount = 1;
uLastHit = pHit->m_uWordPos;
}
// skip blended part, lemmas and duplicates
if ( HITMAN::GetPos ( pHit->m_uWordPos )>HITMAN::GetPos ( uLastHit ) )
{
uLastHit = pHit->m_uWordPos;
uLastCount++;
}
}
if ( uLastCount )
{
pFieldLengthAttrs [ HITMAN::GetField ( uLastHit ) ] += uLastCount;
}
}
static void ProcessCollectedHits ( VecTraits_T<CSphWordHit> & dHits, int iHitsBegin, bool bMarkTail, int iBlendedHitsStart, bool bHasStopwords, DWORD * pFieldLengthAttrs )
{
// mark trailing hit
// and compute field lengths
if ( bMarkTail )
{
auto * pTail = const_cast < CSphWordHit * > ( &dHits.Last() );
if ( pFieldLengthAttrs && !bHasStopwords )
pFieldLengthAttrs [ HITMAN::GetField ( pTail->m_uWordPos ) ] = HITMAN::GetPos ( pTail->m_uWordPos );
Hitpos_t uEndPos = pTail->m_uWordPos;
if ( iBlendedHitsStart>=0 )
{
assert ( iBlendedHitsStart>=0 && iBlendedHitsStart<dHits.GetLength() );
Hitpos_t uBlendedPos = dHits[iBlendedHitsStart].m_uWordPos;
uEndPos = Min ( uEndPos, uBlendedPos );
}
// set end marker for all tail hits
const CSphWordHit * pStart = dHits.Begin();
while ( pStart<=pTail && uEndPos<=pTail->m_uWordPos )
{
HITMAN::SetEndMarker ( &pTail->m_uWordPos );
--pTail;
}
}
// for stopwords need to process whole stream of collected tokens
if ( pFieldLengthAttrs && bHasStopwords )
CountFieldLengths ( VecTraits_T<CSphWordHit> ( dHits.Begin()+iHitsBegin, dHits.GetLength()-iHitsBegin ), pFieldLengthAttrs );
}
void CSphSource::BuildHits ( CSphString & sError, bool bSkipEndMarker )
{
RowID_t tRowID = m_tDocInfo.m_tRowID;
for ( ; m_tState.m_iField<m_tState.m_iEndField; m_tState.m_iField++ )
{
if ( !m_tState.m_bProcessingHits )
{
// get that field
BYTE * sField = m_tState.m_dFields[m_tState.m_iField-m_tState.m_iStartField];
int iFieldBytes = m_tState.m_dFieldLengths[m_tState.m_iField-m_tState.m_iStartField];
if ( !sField || !(*sField) || !iFieldBytes )
continue;
// load files
const BYTE * sTextToIndex;
const CSphColumnInfo & tField = m_tSchema.GetField(m_tState.m_iField);
if ( tField.m_bFilename )
{
LoadFileField ( &sField, sError );
sTextToIndex = sField;
iFieldBytes = (int) strlen ( (char*)sField );
if ( tField.m_uFieldFlags & CSphColumnInfo::FIELD_STORED )
{
m_dDocFields[m_tState.m_iField].Resize(iFieldBytes);
memcpy ( m_dDocFields[m_tState.m_iField].Begin(), sField, iFieldBytes );
}
if ( m_pFieldFilter && iFieldBytes )
{
m_tState.m_dFiltered.Resize ( 0 );
int iFiltered = m_pFieldFilter->Apply ( sTextToIndex, iFieldBytes, m_tState.m_dFiltered, false );
if ( iFiltered )
{
sTextToIndex = m_tState.m_dFiltered.Begin();
iFieldBytes = iFiltered;
}
}
} else
sTextToIndex = sField;
if ( iFieldBytes<=0 )
continue;
// strip html
if ( m_pStripper )
{
m_pStripper->Strip ( const_cast<BYTE*>(sTextToIndex) );
iFieldBytes = (int) strlen ( (char*)const_cast<BYTE*>(sTextToIndex) );
}
// tokenize and build hits
m_tStats.m_iTotalBytes += iFieldBytes;
m_pTokenizer->BeginField ( m_tState.m_iField );
m_pTokenizer->SetBuffer ( const_cast<BYTE*> ( sTextToIndex ), iFieldBytes );
m_tState.m_iHitPos = HITMAN::Create ( m_tState.m_iField, m_tState.m_iStartPos );
}
const CSphColumnInfo & tField = m_tSchema.GetField ( m_tState.m_iField );
if ( tField.m_uFieldFlags & CSphColumnInfo::FIELD_INDEXED )
{
int iBlendedHitsStart = -1;
int iHitsBegin = m_tHits.GetLength();
if ( tField.m_eWordpart!=SPH_WORDPART_WHOLE )
BuildSubstringHits ( tRowID, tField.m_bPayload, tField.m_eWordpart, iBlendedHitsStart );
else
BuildRegularHits ( tRowID, tField.m_bPayload, iBlendedHitsStart );
ProcessCollectedHits ( m_tHits, iHitsBegin, ( !bSkipEndMarker && !m_tState.m_bProcessingHits && m_tHits.GetLength() ), iBlendedHitsStart, !m_pDict->GetSettings().m_sStopwords.IsEmpty(), m_pFieldLengthAttrs );
}
if ( m_tState.m_bProcessingHits )
break;
}
m_tState.m_bDocumentDone = !m_tState.m_bProcessingHits;
}
//////////////////////////////////////////////////////////////////////////
CSphVector<int64_t> * CSphSource::GetFieldMVA ( int iAttr )
{
return &m_dMvas[iAttr];
}
const CSphString & CSphSource::GetStrAttr ( int iAttr )
{
return m_dStrAttrs[iAttr];
}
SphAttr_t CSphSource::GetAttr ( int iAttr )
{
return m_dAttrs[iAttr];
}
void CSphSource::GetDocFields ( CSphVector<VecTraits_T<BYTE>> & dFields )
{
dFields.Resize ( m_dDocFields.GetLength() );
ARRAY_FOREACH ( i, m_dDocFields )
dFields[i] = VecTraits_T<BYTE>( m_dDocFields[i].Begin(), m_dDocFields[i].GetLength() );
}
void CSphSource::ParseFieldMVA ( int iAttr, const char * szValue )
{
if ( !szValue )
return;
const char * pPtr = szValue;
const char * pDigit = NULL;
const int MAX_NUMBER_LEN = 64;
char szBuf [MAX_NUMBER_LEN];
while ( *pPtr )
{
if ( ( *pPtr>='0' && *pPtr<='9' ) || *pPtr=='-' )
{
if ( !pDigit )
pDigit = pPtr;
} else
{
if ( pDigit )
{
if ( pPtr - pDigit < MAX_NUMBER_LEN )
{
strncpy ( szBuf, pDigit, pPtr - pDigit );
szBuf [pPtr - pDigit] = '\0';
m_dMvas[iAttr].Add ( sphToInt64 ( szBuf ) );
}
pDigit = NULL;
}
}
pPtr++;
}
if ( pDigit )
m_dMvas[iAttr].Add ( sphToInt64 ( pDigit ) );
}
| 28,467
|
C++
|
.cpp
| 785
| 33.22293
| 211
| 0.686575
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,975
|
source_odbc.cpp
|
manticoresoftware_manticoresearch/src/indexing_sources/source_odbc.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "source_odbc.h"
#include <config_indexer.h>
#include <sqlext.h>
#include <sql.h>
#ifndef ODBC_LIB
#define ODBC_LIB nullptr
#endif
static const char * GET_ODBC_LIB ()
{
const char * szEnv = getenv ( "ODBC_LIB" );
if ( szEnv )
return szEnv;
return ODBC_LIB;
}
#if DL_ODBC
static decltype(&SQLFreeHandle) sph_SQLFreeHandle = nullptr;
static decltype(&SQLDisconnect) sph_SQLDisconnect = nullptr;
static decltype(&SQLCloseCursor) sph_SQLCloseCursor = nullptr;
static decltype(&SQLGetDiagRec) sph_SQLGetDiagRec = nullptr;
static decltype(&SQLSetEnvAttr) sph_SQLSetEnvAttr = nullptr;
static decltype(&SQLAllocHandle) sph_SQLAllocHandle = nullptr;
static decltype(&SQLFetch) sph_SQLFetch = nullptr;
static decltype(&SQLExecDirect) sph_SQLExecDirect = nullptr;
static decltype(&SQLNumResultCols) sph_SQLNumResultCols = nullptr;
static decltype(&SQLDescribeCol) sph_SQLDescribeCol = nullptr;
static decltype(&SQLBindCol) sph_SQLBindCol = nullptr;
static decltype(&SQLDrivers) sph_SQLDrivers = nullptr;
static decltype(&SQLDriverConnect) sph_SQLDriverConnect = nullptr;
bool InitDynamicOdbc ()
{
const char * sFuncs[] = {"SQLFreeHandle", "SQLDisconnect",
"SQLCloseCursor", "SQLGetDiagRec", "SQLSetEnvAttr", "SQLAllocHandle",
"SQLFetch", "SQLExecDirect", "SQLNumResultCols", "SQLDescribeCol",
"SQLBindCol", "SQLDrivers", "SQLDriverConnect" };
void ** pFuncs[] = {(void**)&sph_SQLFreeHandle, (void**)&sph_SQLDisconnect,
(void**)&sph_SQLCloseCursor, (void**)&sph_SQLGetDiagRec, (void**)&sph_SQLSetEnvAttr,
(void**)&sph_SQLAllocHandle, (void**)&sph_SQLFetch, (void**)&sph_SQLExecDirect,
(void**)&sph_SQLNumResultCols, (void**)&sph_SQLDescribeCol, (void**)&sph_SQLBindCol,
(void**)&sph_SQLDrivers, (void**)&sph_SQLDriverConnect };
static CSphDynamicLibrary dLib ( GET_ODBC_LIB() );
return dLib.LoadSymbols ( sFuncs, pFuncs, sizeof ( pFuncs ) / sizeof ( void ** ) );
}
#else
#define sph_SQLFreeHandle SQLFreeHandle
#define sph_SQLDisconnect SQLDisconnect
#define sph_SQLCloseCursor SQLCloseCursor
#define sph_SQLGetDiagRec SQLGetDiagRec
#define sph_SQLSetEnvAttr SQLSetEnvAttr
#define sph_SQLAllocHandle SQLAllocHandle
#define sph_SQLFetch SQLFetch
#define sph_SQLExecDirect SQLExecDirect
#define sph_SQLNumResultCols SQLNumResultCols
#define sph_SQLDescribeCol SQLDescribeCol
#define sph_SQLBindCol SQLBindCol
#define sph_SQLDrivers SQLDrivers
#define sph_SQLDriverConnect SQLDriverConnect
#define InitDynamicOdbc() (true)
#endif
struct CSphSource_ODBC : CSphSource_SQL
{
explicit CSphSource_ODBC ( const char * sName );
bool SetupODBC ( const CSphSourceParams_ODBC & tParams );
protected:
void SqlDismissResult () final;
bool SqlQuery ( const char * sQuery ) final;
bool SqlIsError () final;
const char * SqlError () final;
bool SqlConnect () final;
void SqlDisconnect () final;
int SqlNumFields() final;
bool SqlFetchRow() final;
const char * SqlColumn ( int iIndex ) final;
const char * SqlFieldName ( int iIndex ) final;
DWORD SqlColumnLength ( int iIndex ) final;
virtual void OdbcPostConnect () {}
protected:
CSphString m_sOdbcDSN;
bool m_bWinAuth = false;
bool m_bUnicode = false;
SQLHENV m_hEnv { nullptr };
SQLHDBC m_hDBC { nullptr };
SQLHANDLE m_hStmt { nullptr };
int m_nResultCols = 0;
CSphString m_sError;
struct QueryColumn_t
{
CSphVector<char> m_dContents;
CSphVector<char> m_dRaw;
CSphString m_sName;
SQLLEN m_iInd;
int m_iBytes; ///< size of actual data in m_dContents, in bytes
int m_iBufferSize; ///< size of m_dContents and m_dRaw buffers, in bytes
bool m_bUCS2; ///< whether this column needs UCS-2 to UTF-8 translation
bool m_bTruncated; ///< whether data was truncated when fetching rows
};
static const int DEFAULT_COL_SIZE = 1024; ///< default column buffer size
static const int VARCHAR_COL_SIZE = 1048576; ///< default column buffer size for VARCHAR columns
static const int MAX_COL_SIZE = 8*1048576; ///< hard limit on column buffer size
static const int WARN_ROW_SIZE = 32*1048576; ///< warning thresh (NOT a hard limit) on row buffer size
CSphVector<QueryColumn_t> m_dColumns;
SmallStringHash_T<int> m_hColBuffers;
void GetSqlError ( SQLSMALLINT iHandleType, SQLHANDLE hHandle );
};
CSphSource_ODBC::CSphSource_ODBC ( const char * sName )
: CSphSource_SQL ( sName )
{}
void CSphSource_ODBC::SqlDismissResult ()
{
if ( m_hStmt )
{
sph_SQLCloseCursor ( m_hStmt );
sph_SQLFreeHandle ( SQL_HANDLE_STMT, m_hStmt );
m_hStmt = nullptr;
}
}
#define MS_SQL_BUFFER_GAP 16
bool CSphSource_ODBC::SqlQuery ( const char * sQuery )
{
if ( sph_SQLAllocHandle ( SQL_HANDLE_STMT, m_hDBC, &m_hStmt )==SQL_ERROR )
{
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-QUERY: %s: FAIL (SQLAllocHandle failed)\n", sQuery );
return false;
}
if ( sph_SQLExecDirect ( m_hStmt, (SQLCHAR *)const_cast<char*>(sQuery), SQL_NTS )==SQL_ERROR )
{
GetSqlError ( SQL_HANDLE_STMT, m_hStmt );
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-QUERY: %s: FAIL\n", sQuery );
return false;
}
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-QUERY: %s: ok\n", sQuery );
SQLSMALLINT nCols = 0;
m_nResultCols = 0;
if ( sph_SQLNumResultCols ( m_hStmt, &nCols )==SQL_ERROR )
return false;
m_nResultCols = nCols;
const int MAX_NAME_LEN = 512;
char szColumnName[MAX_NAME_LEN];
m_dColumns.Resize ( m_nResultCols );
int iTotalBuffer = 0;
ARRAY_FOREACH ( i, m_dColumns )
{
QueryColumn_t & tCol = m_dColumns[i];
SQLULEN uColSize = 0;
SQLSMALLINT iNameLen = 0;
SQLSMALLINT iDataType = 0;
if ( sph_SQLDescribeCol ( m_hStmt, (SQLUSMALLINT)(i+1), (SQLCHAR*)szColumnName,
MAX_NAME_LEN, &iNameLen, &iDataType, &uColSize, nullptr, nullptr )==SQL_ERROR )
return false;
tCol.m_sName = szColumnName;
tCol.m_sName.ToLower();
// deduce buffer size
// use a small buffer by default, and a bigger one for varchars
int iBuffLen = DEFAULT_COL_SIZE;
if ( iDataType==SQL_WCHAR || iDataType==SQL_WVARCHAR || iDataType==SQL_WLONGVARCHAR|| iDataType==SQL_VARCHAR )
iBuffLen = VARCHAR_COL_SIZE;
if ( m_hColBuffers ( tCol.m_sName ) )
iBuffLen = m_hColBuffers [ tCol.m_sName ]; // got explicit user override
else if ( uColSize )
iBuffLen = (int) Min ( uColSize+1, (SQLULEN) MAX_COL_SIZE ); // got data from driver
tCol.m_dContents.Resize ( iBuffLen + MS_SQL_BUFFER_GAP );
tCol.m_dRaw.Resize ( iBuffLen + MS_SQL_BUFFER_GAP );
tCol.m_iInd = 0;
tCol.m_iBytes = 0;
tCol.m_iBufferSize = iBuffLen;
tCol.m_bUCS2 = m_bUnicode && ( iDataType==SQL_WCHAR || iDataType==SQL_WVARCHAR || iDataType==SQL_WLONGVARCHAR );
tCol.m_bTruncated = false;
iTotalBuffer += iBuffLen;
if ( sph_SQLBindCol ( m_hStmt, (SQLUSMALLINT)(i+1),
tCol.m_bUCS2 ? SQL_UNICODE : SQL_C_CHAR,
tCol.m_bUCS2 ? tCol.m_dRaw.Begin() : tCol.m_dContents.Begin(),
iBuffLen, &(tCol.m_iInd) )==SQL_ERROR )
return false;
}
if ( iTotalBuffer>WARN_ROW_SIZE )
sphWarn ( "row buffer is over %d bytes; consider revising sql_column_buffers", iTotalBuffer );
return true;
}
bool CSphSource_ODBC::SqlIsError ()
{
return !m_sError.IsEmpty ();
}
const char * CSphSource_ODBC::SqlError ()
{
return m_sError.cstr();
}
bool CSphSource_ODBC::SqlConnect ()
{
if_const ( !InitDynamicOdbc() )
{
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-CONNECT: FAIL (NO ODBC CLIENT LIB, tried %s)\n", GET_ODBC_LIB() );
return false;
}
if ( sph_SQLAllocHandle ( SQL_HANDLE_ENV, nullptr, &m_hEnv )==SQL_ERROR )
{
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-CONNECT: FAIL\n" );
return false;
}
sph_SQLSetEnvAttr ( m_hEnv, SQL_ATTR_ODBC_VERSION, (void*) SQL_OV_ODBC3, SQL_IS_INTEGER );
if ( sph_SQLAllocHandle ( SQL_HANDLE_DBC, m_hEnv, &m_hDBC )==SQL_ERROR )
{
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-CONNECT: FAIL\n" );
return false;
}
OdbcPostConnect ();
char szOutConn [2048];
SQLSMALLINT iOutConn = 0;
if ( sph_SQLDriverConnect ( m_hDBC, nullptr, (SQLTCHAR*) const_cast<char*>( m_sOdbcDSN.cstr() ), SQL_NTS, (SQLCHAR*)szOutConn, sizeof(szOutConn), &iOutConn, SQL_DRIVER_NOPROMPT )==SQL_ERROR )
{
GetSqlError ( SQL_HANDLE_DBC, m_hDBC );
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-CONNECT: FAIL\n" );
return false;
}
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-CONNECT: ok\n" );
return true;
}
void CSphSource_ODBC::SqlDisconnect ()
{
if ( m_tParams.m_bPrintQueries )
fprintf ( stdout, "SQL-DISCONNECT\n" );
if ( m_hStmt!=nullptr )
sph_SQLFreeHandle ( SQL_HANDLE_STMT, m_hStmt );
if ( m_hDBC )
{
sph_SQLDisconnect ( m_hDBC );
sph_SQLFreeHandle ( SQL_HANDLE_DBC, m_hDBC );
}
if ( m_hEnv )
sph_SQLFreeHandle ( SQL_HANDLE_ENV, m_hEnv );
}
int CSphSource_ODBC::SqlNumFields ()
{
if ( !m_hStmt )
return -1;
return m_nResultCols;
}
bool CSphSource_ODBC::SqlFetchRow ()
{
if ( !m_hStmt )
return false;
SQLRETURN iRet = sph_SQLFetch ( m_hStmt );
if ( iRet==SQL_ERROR || iRet==SQL_INVALID_HANDLE || iRet==SQL_NO_DATA )
{
GetSqlError ( SQL_HANDLE_STMT, m_hStmt );
return false;
}
ARRAY_FOREACH ( i, m_dColumns )
{
QueryColumn_t & tCol = m_dColumns[i];
switch ( tCol.m_iInd )
{
case SQL_NULL_DATA:
tCol.m_dContents[0] = '\0';
tCol.m_iBytes = 0;
break;
default:
#if _WIN32 // FIXME! support UCS-2 columns on Unix too
if ( tCol.m_bUCS2 )
{
// WideCharToMultiByte should get NULL terminated string
memset ( tCol.m_dRaw.Begin()+tCol.m_iBufferSize, 0, MS_SQL_BUFFER_GAP );
int iConv = WideCharToMultiByte ( CP_UTF8, 0, LPCWSTR ( tCol.m_dRaw.Begin() ), (int) tCol.m_iInd/sizeof(WCHAR),
LPSTR ( tCol.m_dContents.Begin() ), tCol.m_iBufferSize-1, NULL, NULL );
if ( iConv==0 )
if ( GetLastError()==ERROR_INSUFFICIENT_BUFFER )
iConv = tCol.m_iBufferSize-1;
tCol.m_dContents[iConv] = '\0';
tCol.m_iBytes = iConv;
} else
#endif
{
if ( tCol.m_iInd>=0 && tCol.m_iInd<tCol.m_iBufferSize )
{
// data fetched ok; add trailing zero
tCol.m_dContents[tCol.m_iInd] = '\0';
tCol.m_iBytes = (int)tCol.m_iInd;
} else if ( tCol.m_iInd>=tCol.m_iBufferSize && !tCol.m_bTruncated )
{
// out of buffer; warn about that (once)
tCol.m_bTruncated = true;
sphWarn ( "'%s' column truncated (buffer=%d, got=%d); consider revising sql_column_buffers",
tCol.m_sName.cstr(), tCol.m_iBufferSize-1, (int) tCol.m_iInd );
tCol.m_iBytes = tCol.m_iBufferSize;
}
}
break;
}
}
return iRet!=SQL_NO_DATA;
}
const char * CSphSource_ODBC::SqlColumn ( int iIndex )
{
if ( !m_hStmt )
return NULL;
return &(m_dColumns [iIndex].m_dContents[0]);
}
const char * CSphSource_ODBC::SqlFieldName ( int iIndex )
{
return m_dColumns[iIndex].m_sName.cstr();
}
DWORD CSphSource_ODBC::SqlColumnLength ( int iIndex )
{
return m_dColumns[iIndex].m_iBytes;
}
bool CSphSource_ODBC::SetupODBC ( const CSphSourceParams_ODBC & tParams )
{
if ( !CSphSource_SQL::SetupSQL ( tParams ) )
return false;
// parse column buffers spec, if any
if ( !tParams.m_sColBuffers.IsEmpty() )
{
const char * p = tParams.m_sColBuffers.cstr();
while ( *p )
{
// skip space
while ( sphIsSpace(*p) )
p++;
// expect eof or ident
if ( !*p )
break;
if ( !sphIsAlpha(*p) )
{
m_sError.SetSprintf ( "identifier expected in sql_column_buffers near '%s'", p );
return false;
}
// get ident
CSphString sCol;
const char * pIdent = p;
while ( sphIsAlpha(*p) )
++p;
sCol.SetBinary ( pIdent, int ( p-pIdent ) );
// skip space
while ( sphIsSpace(*p) )
++p;
// expect assignment
if ( *p!='=' )
{
m_sError.SetSprintf ( "'=' expected in sql_column_buffers near '%s'", p );
return false;
}
++p;
// skip space
while ( sphIsSpace(*p) )
++p;
// expect number
if (!( *p>='0' && *p<='9' ))
{
m_sError.SetSprintf ( "number expected in sql_column_buffers near '%s'", p );
return false;
}
// get value
int iSize = 0;
while ( *p>='0' && *p<='9' )
{
iSize = 10*iSize + ( *p-'0' );
p++;
}
if ( *p=='K' )
{
iSize *= 1024;
p++;
} else if ( *p=='M' )
{
iSize *= 1048576;
p++;
}
// hash value
sCol.ToLower();
m_hColBuffers.Add ( iSize, sCol );
// skip space
while ( sphIsSpace(*p) )
p++;
// expect eof or comma
if ( !*p )
break;
if ( *p!=',' )
{
m_sError.SetSprintf ( "comma expected in sql_column_buffers near '%s'", p );
return false;
}
p++;
}
}
// ODBC specific params
m_sOdbcDSN = tParams.m_sOdbcDSN;
m_bWinAuth = tParams.m_bWinAuth;
// build and store DSN for error reporting
char sBuf [ 1024 ];
snprintf ( sBuf, sizeof(sBuf), "odbc%s", m_sSqlDSN.cstr()+3 );
m_sSqlDSN = sBuf;
return true;
}
void CSphSource_ODBC::GetSqlError ( SQLSMALLINT iHandleType, SQLHANDLE hHandle )
{
if ( !hHandle )
{
m_sError.SetSprintf ( "invalid handle" );
return;
}
char szState[16] = "";
char szMessageText[1024] = "";
SQLINTEGER iError;
SQLSMALLINT iLen;
sph_SQLGetDiagRec ( iHandleType, hHandle, 1, (SQLCHAR*)szState, &iError, (SQLCHAR*)szMessageText, 1024, &iLen );
m_sError = szMessageText;
}
//////////////////////////////////////////////////////////////////////////
/// MS SQL source implementation
struct CSphSource_MSSQL : public CSphSource_ODBC
{
explicit CSphSource_MSSQL ( const char * sName ) : CSphSource_ODBC ( sName ) { m_bUnicode=true; }
void OdbcPostConnect () final;
};
void CSphSource_MSSQL::OdbcPostConnect ()
{
if ( !m_sOdbcDSN.IsEmpty() )
return;
const int MAX_LEN = 1024;
char szDriver[MAX_LEN];
char szDriverAttrs[MAX_LEN];
SQLSMALLINT iDescLen = 0;
SQLSMALLINT iAttrLen = 0;
SQLSMALLINT iDir = SQL_FETCH_FIRST;
CSphString sDriver;
while (true)
{
SQLRETURN iRet = sph_SQLDrivers ( m_hEnv, iDir, (SQLCHAR*)szDriver, MAX_LEN, &iDescLen, (SQLCHAR*)szDriverAttrs, MAX_LEN, &iAttrLen );
if ( iRet==SQL_NO_DATA )
break;
iDir = SQL_FETCH_NEXT;
if ( !strcmp ( szDriver, "SQL Native Client" )
|| !strncmp ( szDriver, "SQL Server Native Client", strlen("SQL Server Native Client") ) )
{
sDriver = szDriver;
break;
}
}
if ( sDriver.IsEmpty() )
sDriver = "SQL Server";
if ( m_bWinAuth && m_tParams.m_sUser.IsEmpty () )
{
m_sOdbcDSN.SetSprintf ( "DRIVER={%s};SERVER={%s};Database={%s};Trusted_Connection=yes",
sDriver.cstr (), m_tParams.m_sHost.cstr (), m_tParams.m_sDB.cstr () );
} else if ( m_bWinAuth )
{
m_sOdbcDSN.SetSprintf ( "DRIVER={%s};SERVER={%s};UID={%s};PWD={%s};Database={%s};Trusted_Connection=yes",
sDriver.cstr (), m_tParams.m_sHost.cstr (), m_tParams.m_sUser.cstr (), m_tParams.m_sPass.cstr (), m_tParams.m_sDB.cstr () );
} else
{
m_sOdbcDSN.SetSprintf ( "DRIVER={%s};SERVER={%s};UID={%s};PWD={%s};Database={%s}",
sDriver.cstr (), m_tParams.m_sHost.cstr (), m_tParams.m_sUser.cstr (), m_tParams.m_sPass.cstr (), m_tParams.m_sDB.cstr () );
}
}
// the fabrics
CSphSource * CreateSourceODBC ( const CSphSourceParams_ODBC & tParams, const char * sSourceName )
{
auto * pSrc = new CSphSource_ODBC ( sSourceName );
if ( !pSrc->SetupODBC ( tParams ) )
SafeDelete ( pSrc );
return pSrc;
}
CSphSource * CreateSourceMSSQL ( const CSphSourceParams_ODBC & tParams, const char * sSourceName )
{
auto * pSrc = new CSphSource_MSSQL ( sSourceName );
if ( !pSrc->SetupODBC ( tParams ) )
SafeDelete ( pSrc );
return pSrc;
}
| 16,073
|
C++
|
.cpp
| 486
| 30.117284
| 192
| 0.682279
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,976
|
source_xmlpipe2.cpp
|
manticoresoftware_manticoresearch/src/indexing_sources/source_xmlpipe2.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <config_indexer.h>
#include "source_xmlpipe2.h"
#include "indexcheck.h"
#include "schema_configurator.h"
#include "attribute.h"
#include "conversion.h"
#include "dict/dict_base.h"
#define XMLIMPORT
#include "expat.h"
// workaround for expat versions prior to 1.95.7
#ifndef XMLCALL
#define XMLCALL
#endif
#if WITH_ICONV
#include "iconv.h"
#endif
/////////////////////////////////////////////////////////////////////////////
// XMLPIPE (v2)
/////////////////////////////////////////////////////////////////////////////
#ifndef EXPAT_LIB
#define EXPAT_LIB nullptr
#endif
static const char * GET_EXPAT_LIB ()
{
const char * szEnv = getenv ( "EXPAT_LIB" );
if ( szEnv )
return szEnv;
return EXPAT_LIB;
}
#ifndef ICONV_LIB
#define ICONV_LIB nullptr
#endif
static const char * GET_ICONV_LIB ()
{
const char * szEnv = getenv ( "ICONV_LIB" );
if ( szEnv )
return szEnv;
return ICONV_LIB;
}
#if DL_EXPAT
static decltype (&XML_ParserFree) sph_XML_ParserFree = nullptr;
static decltype (&XML_Parse) sph_XML_Parse = nullptr;
static decltype (&XML_GetCurrentColumnNumber) sph_XML_GetCurrentColumnNumber = nullptr;
static decltype (&XML_GetCurrentLineNumber) sph_XML_GetCurrentLineNumber = nullptr;
static decltype (&XML_GetErrorCode) sph_XML_GetErrorCode = nullptr;
static decltype (&XML_ErrorString) sph_XML_ErrorString = nullptr;
static decltype (&XML_ParserCreate) sph_XML_ParserCreate = nullptr;
static decltype (&XML_SetUserData) sph_XML_SetUserData = nullptr;
static decltype (&XML_SetElementHandler) sph_XML_SetElementHandler = nullptr;
static decltype (&XML_SetCharacterDataHandler) sph_XML_SetCharacterDataHandler = nullptr;
static decltype (&XML_SetUnknownEncodingHandler) sph_XML_SetUnknownEncodingHandler = nullptr;
static bool InitDynamicExpat ()
{
const char * sFuncs[] = { "XML_ParserFree", "XML_Parse",
"XML_GetCurrentColumnNumber", "XML_GetCurrentLineNumber", "XML_GetErrorCode", "XML_ErrorString",
"XML_ParserCreate", "XML_SetUserData", "XML_SetElementHandler", "XML_SetCharacterDataHandler",
"XML_SetUnknownEncodingHandler" };
void ** pFuncs[] = { (void **) & sph_XML_ParserFree, (void **) & sph_XML_Parse,
(void **) & sph_XML_GetCurrentColumnNumber, (void **) & sph_XML_GetCurrentLineNumber,
(void **) & sph_XML_GetErrorCode, (void **) & sph_XML_ErrorString,
(void **) & sph_XML_ParserCreate, (void **) & sph_XML_SetUserData,
(void **) & sph_XML_SetElementHandler, (void **) & sph_XML_SetCharacterDataHandler,
(void **) & sph_XML_SetUnknownEncodingHandler };
static CSphDynamicLibrary dLib ( GET_EXPAT_LIB() );
return dLib.LoadSymbols ( sFuncs, pFuncs, sizeof ( pFuncs ) / sizeof ( void ** ) );
}
#else
#define sph_XML_ParserFree XML_ParserFree
#define sph_XML_Parse XML_Parse
#define sph_XML_GetCurrentColumnNumber XML_GetCurrentColumnNumber
#define sph_XML_GetCurrentLineNumber XML_GetCurrentLineNumber
#define sph_XML_GetErrorCode XML_GetErrorCode
#define sph_XML_ErrorString XML_ErrorString
#define sph_XML_ParserCreate XML_ParserCreate
#define sph_XML_SetUserData XML_SetUserData
#define sph_XML_SetElementHandler XML_SetElementHandler
#define sph_XML_SetCharacterDataHandler XML_SetCharacterDataHandler
#define sph_XML_SetUnknownEncodingHandler XML_SetUnknownEncodingHandler
#define InitDynamicExpat() (true)
#endif
#if WITH_ICONV
#if DL_ICONV
static decltype (&iconv) sph_iconv = nullptr;
static decltype (&iconv_close) sph_iconv_close = nullptr;
static decltype (&iconv_open) sph_iconv_open = nullptr;
static bool InitDynamicIconv ()
{
const char * sFuncs[] = { "iconv", "iconv_close", "iconv_open" };
void ** pFuncs[] = { (void **) & sph_iconv, (void **) & sph_iconv_close, (void **) & sph_iconv_open, };
static CSphDynamicLibrary dLib ( GET_ICONV_LIB() );
return dLib.LoadSymbols ( sFuncs, pFuncs, sizeof ( pFuncs ) / sizeof ( void ** ) );
}
#else
#define sph_iconv iconv
#define sph_iconv_close iconv_close
#define sph_iconv_open iconv_open
#define InitDynamicIconv() (true)
#endif
#else
#define InitDynamicIconv() (true)
#endif
/// XML pipe source implementation (v2)
class CSphSource_XMLPipe2 final : public CSphSource, public CSphSchemaConfigurator<CSphSource_XMLPipe2>
{
public:
explicit CSphSource_XMLPipe2 ( const char * sName );
~CSphSource_XMLPipe2 () final;
bool SetupXML ( int iFieldBufferMax, bool bFixupUTF8, FILE * pPipe, const CSphConfigSection & hSource, CSphString & sError ); ///< memorize the command
bool Connect ( CSphString & sError ) final; ///< run the command and open the pipe
void Disconnect () final; ///< close the pipe
bool IterateStart ( CSphString & ) final { m_iPlainFieldsLength = m_tSchema.GetFieldsCount(); return true; } ///< Connect() starts getting documents automatically, so this one is empty
BYTE ** NextDocument ( bool & bEOF, CSphString & sError ) final; ///< parse incoming chunk and emit some hits
const int * GetFieldLengths () const final { return m_dFieldLengths.Begin(); }
bool IterateMultivaluedStart ( int, CSphString & ) final{ return false; }
bool IterateMultivaluedNext ( int64_t & iDocID, int64_t & iMvaValue ) final { return false; }
bool IterateKillListStart ( CSphString & ) final;
bool IterateKillListNext ( DocID_t & tDocId ) final;
void StartElement ( const char * szName, const char ** pAttrs );
void EndElement ( const char * szName );
void Characters ( const char * pCharacters, int iLen );
void Error ( const char * sTemplate, ... ) __attribute__ ( ( format ( printf, 2, 3 ) ) );
const char * DecorateMessage ( const char * sTemplate, ... ) const __attribute__ ( ( format ( printf, 2, 3 ) ) );
const char * DecorateMessageVA ( const char * sTemplate, va_list ap ) const;
private:
struct Document_t
{
DocID_t m_tDocID;
CSphVector < CSphVector<BYTE> > m_dFields;
StrVec_t m_dAttrs;
};
Document_t * m_pCurDocument = nullptr;
CSphVector<Document_t *> m_dParsedDocuments;
FILE * m_pPipe = nullptr; ///< incoming stream
CSphString m_sError;
CSphString m_sDocIDError;
StrVec_t m_dDefaultAttrs;
StrVec_t m_dInvalid;
StrVec_t m_dWarned;
int m_iElementDepth = 0;
BYTE * m_pBuffer = nullptr;
int m_iBufferSize = 1048576;
CSphVector<BYTE*> m_dFieldPtrs;
CSphVector<int> m_dFieldLengths;
bool m_bRemoveParsed = false;
bool m_bInDocset = false;
bool m_bInSchema = false;
bool m_bInDocument = false;
bool m_bInKillList = false;
bool m_bInId = false;
bool m_bInIgnoredTag = false;
bool m_bFirstTagAfterDocset = false;
int m_iKillListIterator = 0;
CSphVector<DocID_t> m_dKillList;
int m_iCurField = -1;
int m_iCurAttr = -1;
XML_Parser m_pParser {nullptr};
int m_iFieldBufferMax = 65536;
BYTE * m_pFieldBuffer = nullptr;
int m_iFieldBufferLen = 0;
bool m_bFixupUTF8 = false; ///< whether to replace invalid utf-8 codepoints with spaces
int m_iReparseStart = 0; ///< utf-8 fixerupper might need to postpone a few bytes, starting at this offset
int m_iReparseLen = 0; ///< and this much bytes (under 4)
void UnexpectedCharaters ( const char * pCharacters, int iLen, const char * szComment );
bool ParseNextChunk ( int iBufferLen, CSphString & sError );
void DocumentError ( const char * sWhere )
{
Error ( "malformed source, <sphinx:document> found inside %s", sWhere );
// Ideally I'd like to display a notice on the next line that
// would say where exactly it's allowed. E.g.:
//
// <sphinx:document> must be contained in <sphinx:docset>
}
};
// callbacks
static void XMLCALL xmlStartElement ( void * user_data, const XML_Char * name, const XML_Char ** attrs )
{
auto * pSource = (CSphSource_XMLPipe2 *) user_data;
pSource->StartElement ( name, attrs );
}
static void XMLCALL xmlEndElement ( void * user_data, const XML_Char * name )
{
auto * pSource = (CSphSource_XMLPipe2 *) user_data;
pSource->EndElement ( name );
}
static void XMLCALL xmlCharacters ( void * user_data, const XML_Char * ch, int len )
{
auto * pSource = (CSphSource_XMLPipe2 *) user_data;
pSource->Characters ( ch, len );
}
#if WITH_ICONV
static int XMLCALL xmlUnknownEncoding ( void *, const XML_Char * name, XML_Encoding * info )
{
iconv_t pDesc = sph_iconv_open ( "UTF-16", name );
if ( !pDesc )
return XML_STATUS_ERROR;
for ( size_t i = 0; i < 256; i++ )
{
char cIn = (char) i;
char dOut[4];
memset ( dOut, 0, sizeof ( dOut ) );
#if ICONV_INBUF_CONST
const
#endif
char * pInbuf = &cIn;
char * pOutbuf = dOut;
size_t iInBytesLeft = 1;
size_t iOutBytesLeft = 4;
if ( sph_iconv ( pDesc, &pInbuf, &iInBytesLeft, &pOutbuf, &iOutBytesLeft )!=size_t(-1) )
info->map[i] = int ( BYTE ( dOut[0] ) ) << 8 | int ( BYTE ( dOut[1] ) );
else
info->map[i] = 0;
}
sph_iconv_close ( pDesc );
return XML_STATUS_OK;
}
#endif
CSphSource_XMLPipe2::CSphSource_XMLPipe2 ( const char * sName )
: CSphSource ( sName )
{}
CSphSource_XMLPipe2::~CSphSource_XMLPipe2 ()
{
Disconnect ();
SafeDeleteArray ( m_pBuffer );
SafeDeleteArray ( m_pFieldBuffer );
for ( auto& i: m_dParsedDocuments )
SafeDelete ( i );
}
void CSphSource_XMLPipe2::Disconnect ()
{
if ( m_pPipe )
{
pclose ( m_pPipe );
m_pPipe = NULL;
}
if ( m_pParser )
{
sph_XML_ParserFree ( m_pParser );
m_pParser = NULL;
}
m_tHits.Reset();
}
void CSphSource_XMLPipe2::Error ( const char * sTemplate, ... )
{
if ( !m_sError.IsEmpty() )
return;
va_list ap;
va_start ( ap, sTemplate );
m_sError = DecorateMessageVA ( sTemplate, ap );
va_end ( ap );
}
const char * CSphSource_XMLPipe2::DecorateMessage ( const char * sTemplate, ... ) const
{
va_list ap;
va_start ( ap, sTemplate );
const char * sRes = DecorateMessageVA ( sTemplate, ap );
va_end ( ap );
return sRes;
}
const char * CSphSource_XMLPipe2::DecorateMessageVA ( const char * sTemplate, va_list ap ) const
{
static char sBuf[1024];
snprintf ( sBuf, sizeof(sBuf), "source '%s': ", m_tSchema.GetName() );
auto iBufLen = strlen ( sBuf );
auto iLeft = sizeof(sBuf) - iBufLen;
char * szBufStart = sBuf + iBufLen;
vsnprintf ( szBufStart, iLeft, sTemplate, ap );
iBufLen = strlen ( sBuf );
iLeft = sizeof(sBuf) - iBufLen;
szBufStart = sBuf + iBufLen;
if ( m_pParser )
{
DocID_t tFailedID = 0;
if ( m_dParsedDocuments.GetLength() )
tFailedID = m_dParsedDocuments.Last()->m_tDocID;
snprintf ( szBufStart, iLeft, " (line=%d, pos=%d, docid=" INT64_FMT ")",
(int)sph_XML_GetCurrentLineNumber ( m_pParser ), (int)sph_XML_GetCurrentColumnNumber ( m_pParser ),
tFailedID );
}
return sBuf;
}
bool CSphSource_XMLPipe2::SetupXML ( int iFieldBufferMax, bool bFixupUTF8, FILE * pPipe, const CSphConfigSection & hSource, CSphString & sError )
{
assert ( !m_pBuffer && !m_pFieldBuffer && !m_pPipe );
m_pBuffer = new BYTE [m_iBufferSize];
m_iFieldBufferMax = Max ( iFieldBufferMax, 65536 );
m_pFieldBuffer = new BYTE [ m_iFieldBufferMax+1 ]; // safe gap for tail zero
m_bFixupUTF8 = bFixupUTF8;
m_pPipe = pPipe;
m_tSchema.Reset ();
bool bWordDict = ( m_pDict && m_pDict->GetSettings().m_bWordDict );
bool bOk = true;
bOk &= ConfigureAttrs ( hSource("xmlpipe_attr_uint"), SPH_ATTR_INTEGER, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("xmlpipe_attr_timestamp"), SPH_ATTR_TIMESTAMP, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("xmlpipe_attr_bool"), SPH_ATTR_BOOL, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("xmlpipe_attr_float"), SPH_ATTR_FLOAT, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("xmlpipe_attr_bigint"), SPH_ATTR_BIGINT, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("xmlpipe_attr_multi"), SPH_ATTR_UINT32SET, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("xmlpipe_attr_multi_64"), SPH_ATTR_INT64SET, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("xmlpipe_attr_string"), SPH_ATTR_STRING, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("xmlpipe_attr_json"), SPH_ATTR_JSON, m_tSchema, sError );
bOk &= ConfigureAttrs ( hSource("xmlpipe_field_string"), SPH_ATTR_STRING, m_tSchema, sError );
if ( !bOk )
return false;
if ( !DebugCheckSchema ( m_tSchema, sError ) )
return false;
ConfigureFields ( hSource("xmlpipe_field"), bWordDict, m_tSchema );
ConfigureFields ( hSource("xmlpipe_field_string"), bWordDict, m_tSchema );
AllocDocinfo();
return true;
}
bool CSphSource_XMLPipe2::Connect ( CSphString & sError )
{
assert ( m_pBuffer && m_pFieldBuffer );
// source settings have been updated after ::Setup
for ( int i = 0; i < m_tSchema.GetFieldsCount(); ++i )
{
ESphWordpart eWordpart = GetWordpart ( m_tSchema.GetFieldName(i), m_pDict && m_pDict->GetSettings().m_bWordDict );
m_tSchema.SetFieldWordpart ( i, eWordpart );
}
if_const ( !InitDynamicExpat() )
{
sError.SetSprintf ( "xmlpipe: failed to load libexpat library (tried %s)\n", GET_EXPAT_LIB() );
return false;
}
if_const ( !InitDynamicIconv() )
{
sError.SetSprintf ( "xmlpipe: failed to load iconv library (tried %s)\n", GET_ICONV_LIB() );
return false;
}
if ( !AddAutoAttrs ( sError ) )
return false;
AllocDocinfo();
m_pParser = sph_XML_ParserCreate(nullptr);
if ( !m_pParser )
{
sError.SetSprintf ( "xmlpipe: failed to create XML parser" );
return false;
}
sph_XML_SetUserData ( m_pParser, this );
sph_XML_SetElementHandler ( m_pParser, xmlStartElement, xmlEndElement );
sph_XML_SetCharacterDataHandler ( m_pParser, xmlCharacters );
#if WITH_ICONV
sph_XML_SetUnknownEncodingHandler ( m_pParser, xmlUnknownEncoding, nullptr );
#endif
m_dKillList.Reserve ( 1024 );
m_dKillList.Resize ( 0 );
m_bRemoveParsed = false;
m_bInDocset = false;
m_bInSchema = false;
m_bInDocument = false;
m_bInKillList = false;
m_bInId = false;
m_bFirstTagAfterDocset = false;
m_iCurField = -1;
m_iCurAttr = -1;
m_iElementDepth = 0;
m_dParsedDocuments.Reset ();
m_dDefaultAttrs.Reset ();
m_dInvalid.Reset ();
m_dWarned.Reset ();
m_dParsedDocuments.Reserve ( 1024 );
m_dParsedDocuments.Resize ( 0 );
m_iKillListIterator = 0;
m_sError = "";
auto iBytesRead = (int) fread ( m_pBuffer, 1, m_iBufferSize, m_pPipe );
if ( !ParseNextChunk ( iBytesRead, sError ) )
return false;
m_tHits.Reserve ( m_iMaxHits );
return true;
}
bool CSphSource_XMLPipe2::ParseNextChunk ( int iBufferLen, CSphString & sError )
{
if ( !iBufferLen )
return true;
bool bLast = ( iBufferLen!=m_iBufferSize );
m_iReparseLen = 0;
if ( m_bFixupUTF8 )
{
BYTE * p = m_pBuffer;
BYTE * pMax = m_pBuffer + iBufferLen;
while ( p<pMax )
{
BYTE v = *p;
// fix control codes
if ( v<0x20 && v!=0x0D && v!=0x0A )
{
*p++ = ' ';
continue;
}
// accept ascii7 codes
if ( v<128 )
{
p++;
continue;
}
// remove invalid start bytes
if ( v<0xC2 )
{
*p++ = ' ';
continue;
}
// get and check byte count
int iBytes = 0;
while ( v & 0x80 )
{
iBytes++;
v <<= 1;
}
if ( iBytes<2 || iBytes>3 )
{
*p++ = ' ';
continue;
}
// if we're on a boundary, save these few bytes for the future
if ( p+iBytes>pMax )
{
m_iReparseStart = (int)(p-m_pBuffer);
m_iReparseLen = (int)(pMax-p);
iBufferLen -= m_iReparseLen;
break;
}
// otherwise (not a boundary), check them all
int i = 1;
int iVal = ( v >> iBytes );
for ( ; i<iBytes; i++ )
{
if ( ( p[i] & 0xC0 )!=0x80 )
break;
iVal = ( iVal<<6 ) + ( p[i] & 0x3f );
}
if ( i!=iBytes // remove invalid sequences
|| ( iVal>=0xd800 && iVal<=0xdfff ) // and utf-16 surrogate pairs
|| ( iBytes==3 && iVal<0x800 ) // and overlong 3-byte codes
|| ( iVal>=0xfff0 && iVal<=0xffff ) ) // and kinda-valid specials expat chokes on anyway
{
iBytes = i;
for ( i=0; i<iBytes; i++ )
p[i] = ' ';
}
// only move forward by the amount of succesfully processed bytes!
p += i;
}
}
if ( sph_XML_Parse ( m_pParser, (const char*) m_pBuffer, iBufferLen, bLast )!=XML_STATUS_OK )
{
DocID_t tFailedID = 0;
if ( m_dParsedDocuments.GetLength() )
tFailedID = m_dParsedDocuments.Last()->m_tDocID;
if ( !m_sError.IsEmpty () )
sError = m_sError;
else
{
sError.SetSprintf ( "source '%s': XML parse error: %s (line=%d, pos=%d, docid=" INT64_FMT ")",
m_tSchema.GetName(), sph_XML_ErrorString ( sph_XML_GetErrorCode ( m_pParser ) ),
(int)sph_XML_GetCurrentLineNumber ( m_pParser ), (int)sph_XML_GetCurrentColumnNumber ( m_pParser ),
tFailedID );
}
m_tDocInfo.m_tRowID = 0;
return false;
}
if ( !m_sError.IsEmpty () )
{
sError = m_sError;
m_tDocInfo.m_tRowID = 0;
return false;
}
return true;
}
BYTE ** CSphSource_XMLPipe2::NextDocument ( bool & bEOF, CSphString & sError )
{
assert ( m_pBuffer && m_pFieldBuffer );
bEOF = true;
if ( m_bRemoveParsed )
{
SafeDelete ( m_dParsedDocuments[0] );
m_dParsedDocuments.RemoveFast ( 0 );
m_bRemoveParsed = false;
}
// ok by default
m_tDocInfo.m_tRowID = INVALID_ROWID;
int iReadResult = 0;
while ( m_dParsedDocuments.GetLength()==0 )
{
// saved bytes to the front!
if ( m_iReparseLen )
memmove ( m_pBuffer, m_pBuffer+m_iReparseStart, m_iReparseLen );
// read more data
iReadResult = (int) fread ( m_pBuffer+m_iReparseLen, 1, m_iBufferSize-m_iReparseLen, m_pPipe );
if ( iReadResult==0 )
break;
// and parse it
if ( !ParseNextChunk ( iReadResult+m_iReparseLen, sError ) )
return nullptr;
}
CSphString sWarn;
while ( m_dParsedDocuments.GetLength()!=0 )
{
Document_t * pDocument = m_dParsedDocuments[0];
int nAttrs = m_tSchema.GetAttrsCount ();
int iFirstFieldLenAttr = m_tSchema.GetAttrId_FirstFieldLen();
int iLastFieldLenAttr = m_tSchema.GetAttrId_LastFieldLen();
m_dMvas.Resize ( m_tSchema.GetAttrsCount() );
for ( auto & i : m_dMvas )
i.Resize(0);
// attributes
for ( int i = 0; i < nAttrs; i++ )
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(i);
if ( sphIsInternalAttr(tAttr) )
continue;
// reset, and the value will be filled by IterateHits()
if ( i>=iFirstFieldLenAttr && i<=iLastFieldLenAttr )
{
assert ( tAttr.m_eAttrType==SPH_ATTR_TOKENCOUNT );
m_tDocInfo.SetAttr ( tAttr.m_tLocator, 0 );
continue;
}
const CSphString & sAttrValue = pDocument->m_dAttrs[i].IsEmpty () && m_dDefaultAttrs.GetLength()
? m_dDefaultAttrs[i]
: pDocument->m_dAttrs[i];
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_STRING:
case SPH_ATTR_JSON:
m_dStrAttrs[i] = sAttrValue.cstr();
if ( !m_dStrAttrs[i].cstr() )
m_dStrAttrs[i] = "";
break;
case SPH_ATTR_FLOAT:
{
float fValue = sphToFloat ( sAttrValue.cstr() );
m_dAttrs[i] = sphF2DW(fValue);
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttrFloat ( tAttr.m_tLocator, fValue );
}
break;
case SPH_ATTR_BIGINT:
if ( i )
m_dAttrs[i] = sphToInt64 ( sAttrValue.cstr(), &sWarn );
else // negative number checks not necessary here as they were done before
m_dAttrs[i] = (int64_t)StrToDocID ( sAttrValue.cstr(), sWarn );
if ( !sWarn.IsEmpty() )
sphWarn ( "%s", sWarn.cstr() );
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttr ( tAttr.m_tLocator, m_dAttrs[i] );
break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
if ( tAttr.m_eSrc==SPH_ATTRSRC_FIELD )
ParseFieldMVA ( i, sAttrValue.cstr() );
break;
case SPH_ATTR_BOOL:
m_dAttrs[i] = sphToDword ( sAttrValue.cstr() ) ? 1 : 0;
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttr ( tAttr.m_tLocator, m_dAttrs[i] );
break;
default:
m_dAttrs[i] = sphToDword ( sAttrValue.cstr() );
if ( !tAttr.IsColumnar() )
m_tDocInfo.SetAttr ( tAttr.m_tLocator, m_dAttrs[i] );
break;
}
}
m_bRemoveParsed = true;
int nFields = m_tSchema.GetFieldsCount();
// if ( !nFields )
// return nullptr;
m_dFieldPtrs.Resize ( nFields );
m_dFieldLengths.Resize ( nFields );
for ( int i = 0; i < nFields; ++i )
{
m_dFieldPtrs[i] = pDocument->m_dFields[i].Begin();
m_dFieldLengths[i] = pDocument->m_dFields[i].GetLength();
// skip trailing zero
if ( m_dFieldLengths[i] && !m_dFieldPtrs[i][m_dFieldLengths[i]-1] )
m_dFieldLengths[i]--;
}
bEOF = false;
return m_dFieldPtrs.Begin();
}
return nullptr;
}
bool CSphSource_XMLPipe2::IterateKillListStart ( CSphString & )
{
m_iKillListIterator = 0;
return true;
}
bool CSphSource_XMLPipe2::IterateKillListNext ( DocID_t & tDocID )
{
if ( m_iKillListIterator>=m_dKillList.GetLength () )
return false;
tDocID = m_dKillList [ m_iKillListIterator++ ];
return true;
}
enum EXMLElem
{
ELEM_DOCSET,
ELEM_SCHEMA,
ELEM_FIELD,
ELEM_ATTR,
ELEM_DOCUMENT,
ELEM_KLIST,
ELEM_NONE
};
static EXMLElem LookupElement ( const char * szName )
{
if ( szName[0]!='s' )
return ELEM_NONE;
auto iLen = strlen(szName);
if ( iLen>=11 && iLen<=15 )
{
char iHash = (char)( ( iLen + szName[7] ) & 15 );
switch ( iHash )
{
case 1: if ( !strcmp ( szName, "sphinx:docset" ) ) return ELEM_DOCSET; break;
case 0: if ( !strcmp ( szName, "sphinx:schema" ) ) return ELEM_SCHEMA; break;
case 2: if ( !strcmp ( szName, "sphinx:field" ) ) return ELEM_FIELD; break;
case 12: if ( !strcmp ( szName, "sphinx:attr" ) ) return ELEM_ATTR; break;
case 3: if ( !strcmp ( szName, "sphinx:document" ) ) return ELEM_DOCUMENT;break;
case 10: if ( !strcmp ( szName, "sphinx:killlist" ) ) return ELEM_KLIST; break;
default: break;
}
}
return ELEM_NONE;
}
void CSphSource_XMLPipe2::StartElement ( const char * szName, const char ** pAttrs )
{
EXMLElem ePos = LookupElement ( szName );
switch ( ePos )
{
case ELEM_DOCSET:
m_bInDocset = true;
m_bFirstTagAfterDocset = true;
return;
case ELEM_SCHEMA:
{
if ( !m_bInDocset || !m_bFirstTagAfterDocset )
{
Error ( "<sphinx:schema> is allowed immediately after <sphinx:docset> only" );
return;
}
if ( m_tSchema.GetFieldsCount() > 0 || m_tSchema.GetAttrsCount () > 1 )
sphWarn ( "%s", DecorateMessage ( "both embedded and configured schemas found; using embedded" ) );
m_tSchema.Reset();
CSphMatch tDocInfo;
Swap ( m_tDocInfo, tDocInfo );
m_dDefaultAttrs.Reset();
m_bFirstTagAfterDocset = false;
m_bInSchema = true;
}
return;
case ELEM_FIELD:
{
if ( !m_bInDocset || !m_bInSchema )
{
Error ( "<sphinx:field> is allowed inside <sphinx:schema> only" );
return;
}
const char ** dAttrs = pAttrs;
CSphColumnInfo Info;
CSphString sDefault;
bool bIsAttr = false;
bool bWordDict = ( m_pDict && m_pDict->GetSettings().m_bWordDict );
while ( dAttrs[0] && dAttrs[1] && dAttrs[0][0] && dAttrs[1][0] )
{
if ( !strcmp ( *dAttrs, "name" ) )
{
Info.m_sName = dAttrs[1];
if ( m_tSchema.GetField ( Info.m_sName.cstr() ) )
{
Error ( "field '%s' is added twice", Info.m_sName.cstr() );
return;
}
AddFieldToSchema ( Info.m_sName.cstr(), bWordDict, m_tSchema );
} else if ( !strcmp ( *dAttrs, "attr" ) )
{
bIsAttr = true;
if ( !strcmp ( dAttrs[1], "string" ) )
Info.m_eAttrType = SPH_ATTR_STRING;
else if ( !strcmp ( dAttrs[1], "json" ) )
Info.m_eAttrType = SPH_ATTR_JSON;
} else if ( !strcmp ( *dAttrs, "default" ) )
sDefault = dAttrs[1];
dAttrs += 2;
}
if ( bIsAttr )
{
if ( Info.m_sName.IsEmpty() || CSphSchema::IsReserved ( Info.m_sName.cstr() ) )
{
Error ( "%s is not a valid attribute name", Info.m_sName.cstr() );
return;
}
if ( m_tSchema.GetAttr ( Info.m_sName.cstr() ) )
{
Error ( "attribute '%s' is added twice", Info.m_sName.cstr() );
return;
}
Info.m_iIndex = m_tSchema.GetAttrsCount ();
m_tSchema.AddAttr ( Info, true ); // all attributes are dynamic at indexing time
m_dDefaultAttrs.Add ( sDefault );
}
}
return;
case ELEM_ATTR:
{
if ( !m_bInDocset || !m_bInSchema )
{
Error ( "<sphinx:attr> is allowed inside <sphinx:schema> only" );
return;
}
bool bError = false;
CSphString sDefault;
CSphColumnInfo Info;
Info.m_eAttrType = SPH_ATTR_INTEGER;
const char ** dAttrs = pAttrs;
while ( dAttrs[0] && dAttrs[1] && dAttrs[0][0] && dAttrs[1][0] && !bError )
{
if ( !strcmp ( *dAttrs, "name" ) )
Info.m_sName = dAttrs[1];
else if ( !strcmp ( *dAttrs, "bits" ) )
Info.m_tLocator.m_iBitCount = strtol ( dAttrs[1], NULL, 10 );
else if ( !strcmp ( *dAttrs, "default" ) )
sDefault = dAttrs[1];
else if ( !strcmp ( *dAttrs, "type" ) )
{
const char * szType = dAttrs[1];
if ( !strcmp ( szType, "int" ) ) Info.m_eAttrType = SPH_ATTR_INTEGER;
else if ( !strcmp ( szType, "timestamp" ) ) Info.m_eAttrType = SPH_ATTR_TIMESTAMP;
else if ( !strcmp ( szType, "bool" ) ) Info.m_eAttrType = SPH_ATTR_BOOL;
else if ( !strcmp ( szType, "float" ) ) Info.m_eAttrType = SPH_ATTR_FLOAT;
else if ( !strcmp ( szType, "bigint" ) ) Info.m_eAttrType = SPH_ATTR_BIGINT;
else if ( !strcmp ( szType, "string" ) ) Info.m_eAttrType = SPH_ATTR_STRING;
else if ( !strcmp ( szType, "json" ) ) Info.m_eAttrType = SPH_ATTR_JSON;
else if ( !strcmp ( szType, "multi" ) )
{
Info.m_eAttrType = SPH_ATTR_UINT32SET;
Info.m_eSrc = SPH_ATTRSRC_FIELD;
} else if ( !strcmp ( szType, "multi_64" ) )
{
Info.m_eAttrType = SPH_ATTR_INT64SET;
Info.m_eSrc = SPH_ATTRSRC_FIELD;
} else
{
Error ( "unknown column type '%s'", szType );
bError = true;
}
}
dAttrs += 2;
}
if ( !bError )
{
if ( Info.m_sName.IsEmpty() || CSphSchema::IsReserved ( Info.m_sName.cstr() ) )
{
Error ( "%s is not a valid attribute name", Info.m_sName.cstr() );
return;
}
if ( m_tSchema.GetAttr ( Info.m_sName.cstr() ) )
{
Error ( "attribute '%s' is added twice", Info.m_sName.cstr() );
return;
}
Info.m_iIndex = m_tSchema.GetAttrsCount ();
m_tSchema.AddAttr ( Info, true ); // all attributes are dynamic at indexing time
m_dDefaultAttrs.Add ( sDefault );
}
}
return;
case ELEM_DOCUMENT:
{
if ( !m_bInDocset || m_bInSchema )
return DocumentError ( "<sphinx:schema>" );
if ( m_bInKillList )
return DocumentError ( "<sphinx:killlist>" );
if ( m_bInDocument )
return DocumentError ( "<sphinx:document>" );
if ( m_tSchema.GetFieldsCount()==0 && m_tSchema.GetAttrsCount()==0 )
{
Error ( "no schema configured, and no embedded schema found" );
return;
}
m_bInDocument = true;
assert ( !m_pCurDocument );
m_pCurDocument = new Document_t;
m_pCurDocument->m_tDocID = INT64_MAX;
m_pCurDocument->m_dFields.Resize ( m_tSchema.GetFieldsCount() );
// for safety
ARRAY_FOREACH ( i, m_pCurDocument->m_dFields )
m_pCurDocument->m_dFields[i].Add ( '\0' );
m_pCurDocument->m_dAttrs.Resize ( m_tSchema.GetAttrsCount () );
if ( pAttrs[0] && pAttrs[1] && pAttrs[0][0] && pAttrs[1][0] )
if ( !strcmp ( pAttrs[0], "id" ) )
{
uint64_t uDocID = StrToDocID ( pAttrs[1], m_sDocIDError );
m_pCurDocument->m_tDocID = (DocID_t)uDocID;
m_pCurDocument->m_dAttrs[0] = pAttrs[1];
}
}
return;
case ELEM_KLIST:
{
if ( !m_bInDocset || m_bInDocument || m_bInSchema )
{
Error ( "<sphinx:killlist> is not allowed inside <sphinx:schema> or <sphinx:document>" );
return;
}
m_bInKillList = true;
}
return;
case ELEM_NONE: break; // avoid warning
}
if ( m_bInKillList )
{
if ( m_bInId )
{
m_iElementDepth++;
return;
}
if ( !!strcmp ( szName, "id" ) )
{
Error ( "only 'id' is allowed inside <sphinx:killlist>" );
return;
}
m_bInId = true;
} else if ( m_bInDocument )
{
if ( m_iCurField!=-1 || m_iCurAttr!=-1 )
{
m_iElementDepth++;
return;
}
m_iCurField = m_tSchema.GetFieldIndex ( szName );
m_iCurAttr = m_tSchema.GetAttrIndex ( szName );
if ( m_iCurAttr!=-1 || m_iCurField!=-1 )
return;
m_bInIgnoredTag = true;
bool bInvalidFound = false;
for ( int i = 0; i < m_dInvalid.GetLength () && !bInvalidFound; i++ )
bInvalidFound = m_dInvalid[i]==szName;
if ( !bInvalidFound )
{
sphWarn ( "%s", DecorateMessage ( "unknown field/attribute '%s'; ignored", szName ) );
m_dInvalid.Add ( szName );
}
}
}
void CSphSource_XMLPipe2::EndElement ( const char * szName )
{
m_bInIgnoredTag = false;
EXMLElem ePos = LookupElement ( szName );
switch ( ePos )
{
case ELEM_DOCSET:
m_bInDocset = false;
return;
case ELEM_SCHEMA:
{
m_bInSchema = false;
m_tSchema.SetupFlags ( *this, false, nullptr );
// id attribute is auto added - can not redefine it in schema
if ( m_tSchema.GetAttr ( sphGetDocidName() ) )
{
Error ( "can not define auto-defined '%s' attribute", sphGetDocidName() );
return;
}
AddAutoAttrs ( m_sError, &m_dDefaultAttrs );
AllocDocinfo();
}
return;
case ELEM_DOCUMENT:
m_bInDocument = false;
if ( !m_sDocIDError.IsEmpty() )
{
sphWarn ( "%s", DecorateMessage ( "%s", m_sDocIDError.cstr() ) );
m_sDocIDError = "";
delete m_pCurDocument;
}
else
{
if ( m_pCurDocument )
m_dParsedDocuments.Add ( m_pCurDocument );
}
m_pCurDocument = nullptr;
return;
case ELEM_KLIST:
m_bInKillList = false;
return;
case ELEM_FIELD: // avoid warnings
case ELEM_ATTR:
case ELEM_NONE: break;
}
if ( m_bInKillList )
{
if ( m_iElementDepth!=0 )
{
m_iElementDepth--;
return;
}
if ( m_bInId )
{
m_pFieldBuffer [ Min ( m_iFieldBufferLen, m_iFieldBufferMax ) ] = '\0';
m_dKillList.Add ( sphToInt64 ( (const char *)m_pFieldBuffer ) );
m_iFieldBufferLen = 0;
m_bInId = false;
}
} else if ( m_bInDocument && ( m_iCurAttr!=-1 || m_iCurField!=-1 ) )
{
if ( m_iElementDepth!=0 )
{
--m_iElementDepth;
return;
}
if ( m_iCurField!=-1 )
{
assert ( m_pCurDocument );
CSphVector<BYTE> & dBuf = m_pCurDocument->m_dFields [ m_iCurField ];
dBuf.Last() = ' ';
dBuf.Reserve ( dBuf.GetLength() + m_iFieldBufferLen + 6 ); // 6 is a safety gap
dBuf.Append( m_pFieldBuffer, m_iFieldBufferLen );
dBuf.Add ( '\0' );
}
if ( m_iCurAttr!=-1 )
{
assert ( m_pCurDocument );
if ( !m_pCurDocument->m_dAttrs [ m_iCurAttr ].IsEmpty () )
{
const CSphColumnInfo & tCol = m_tSchema.GetAttr ( m_iCurAttr );
// can not redefine id attribute
if ( tCol.m_sName=="id" )
{
m_pCurDocument->m_dAttrs [ m_iCurAttr ].SetBinary ( (char*)m_pFieldBuffer, m_iFieldBufferLen );
if ( m_dParsedDocuments.GetLength() )
m_dParsedDocuments.Last()->m_tDocID = sphToUInt64 ( m_pCurDocument->m_dAttrs [ m_iCurAttr ].cstr() );
Error ( "duplicate attribute node <%s>", tCol.m_sName.cstr() );
return;
}
else
{
sphWarn ( "duplicate attribute node <%s> - using first value", tCol.m_sName.cstr() );
}
} else
{
m_pCurDocument->m_dAttrs [ m_iCurAttr ].SetBinary ( (char*)m_pFieldBuffer, m_iFieldBufferLen );
}
}
m_iFieldBufferLen = 0;
m_iCurAttr = -1;
m_iCurField = -1;
}
}
void CSphSource_XMLPipe2::UnexpectedCharaters ( const char * pCharacters, int iLen, const char * szComment )
{
const int MAX_WARNING_LENGTH = 64;
bool bSpaces = true;
for ( int i = 0; i < iLen && bSpaces; i++ )
if ( !sphIsSpace ( pCharacters[i] ) )
bSpaces = false;
if ( !bSpaces )
{
CSphString sWarning;
sWarning.SetBinary ( pCharacters, Min ( iLen, MAX_WARNING_LENGTH ) );
sphWarn ( "source '%s': unexpected string '%s' (line=%d, pos=%d) %s",
m_tSchema.GetName(), sWarning.cstr (),
(int)sph_XML_GetCurrentLineNumber ( m_pParser ), (int)sph_XML_GetCurrentColumnNumber ( m_pParser ), szComment );
}
}
void CSphSource_XMLPipe2::Characters ( const char * pCharacters, int iLen )
{
if ( m_bInIgnoredTag )
return;
if ( !m_bInDocset )
{
UnexpectedCharaters ( pCharacters, iLen, "outside of <sphinx:docset>" );
return;
}
if ( !m_bInSchema && !m_bInDocument && !m_bInKillList )
{
UnexpectedCharaters ( pCharacters, iLen, "outside of <sphinx:schema> and <sphinx:document>" );
return;
}
if ( m_iCurAttr==-1 && m_iCurField==-1 && !m_bInKillList )
{
UnexpectedCharaters ( pCharacters, iLen, m_bInDocument ? "inside <sphinx:document>" : ( m_bInSchema ? "inside <sphinx:schema>" : "" ) );
return;
}
if ( iLen + m_iFieldBufferLen <= m_iFieldBufferMax )
{
memcpy ( m_pFieldBuffer + m_iFieldBufferLen, pCharacters, iLen );
m_iFieldBufferLen += iLen;
} else
{
const char * szName = nullptr;
if ( m_iCurField!=-1 )
szName = m_tSchema.GetFieldName ( m_iCurField );
else if ( m_iCurAttr!=-1 )
szName = m_tSchema.GetAttr(m_iCurAttr).m_sName.cstr();
assert ( szName );
bool bWarned = false;
for ( int i = 0; i < m_dWarned.GetLength () && !bWarned; i++ )
bWarned = m_dWarned[i]==szName;
if ( !bWarned )
{
sphWarn ( "source '%s': field/attribute '%s' length exceeds max length (line=%d, pos=%d, docid=" INT64_FMT ")",
m_tSchema.GetName(), szName,
(int)sph_XML_GetCurrentLineNumber ( m_pParser ), (int)sph_XML_GetCurrentColumnNumber ( m_pParser ),
m_pCurDocument->m_tDocID );
m_dWarned.Add ( szName );
}
}
}
CSphSource * sphCreateSourceXmlpipe2 ( const CSphConfigSection * pSource, FILE * pPipe, const char * szSourceName, int iMaxFieldLen, CSphString & sError )
{
bool bUTF8 = pSource->GetInt ( "xmlpipe_fixup_utf8", 0 )!=0;
auto * pXMLPipe = new CSphSource_XMLPipe2(szSourceName);
if ( !pXMLPipe->SetupXML ( iMaxFieldLen, bUTF8, pPipe, *pSource, sError ) )
SafeDelete ( pXMLPipe );
return pXMLPipe;
}
| 33,917
|
C++
|
.cpp
| 1,028
| 29.839494
| 187
| 0.668116
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,977
|
dict_exact.cpp
|
manticoresoftware_manticoresearch/src/dict/dict_exact.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "dict_proxy.h"
#include "sphinxint.h"
//////////////////////////////////////////////////////////////////////////
/// dict wrapper for exact-word syntax
class DictExact_c: public DictProxy_c
{
using DictProxy_c::GetWordID;
public:
explicit DictExact_c ( DictRefPtr_c pDict )
: DictProxy_c ( std::move (pDict) )
{}
SphWordID_t GetWordID ( BYTE* pWord ) final;
};
SphWordID_t DictExact_c::GetWordID ( BYTE* pWord )
{
auto iLen = (int)strlen ( (const char*)pWord );
iLen = Min ( iLen, 16 + 3 * SPH_MAX_WORD_LEN - 1 );
if ( !iLen )
return 0;
if ( pWord[0] == '=' )
pWord[0] = MAGIC_WORD_HEAD_NONSTEMMED;
if ( pWord[0] < ' ' )
return m_pDict->GetWordIDNonStemmed ( pWord );
return m_pDict->GetWordID ( pWord );
}
void SetupExactDict ( DictRefPtr_c& pDict )
{
pDict = new DictExact_c ( pDict );
}
| 1,305
|
C++
|
.cpp
| 40
| 30.85
| 80
| 0.666401
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,978
|
word_forms.cpp
|
manticoresoftware_manticoresearch/src/dict/word_forms.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "word_forms.h"
#include "tokenizer/multiform_container.h"
/////////////////////////////////////////////////////////////////////////////
CSphWordforms::~CSphWordforms()
{
if ( !m_pMultiWordforms )
return;
for ( auto& tForms : m_pMultiWordforms->m_Hash )
{
for ( auto* pForm : tForms.second->m_pForms )
SafeDelete ( pForm );
SafeDelete ( tForms.second );
}
}
bool CSphWordforms::IsEqual ( const CSphVector<CSphSavedFile>& dFiles )
{
if ( m_dFiles.GetLength() != dFiles.GetLength() )
return false;
// don't check file time AND check filename w/o path
// that way same file in different dirs will be only loaded once
ARRAY_FOREACH ( i, m_dFiles )
{
const CSphSavedFile& tF1 = m_dFiles[i];
const CSphSavedFile& tF2 = dFiles[i];
CSphString sFile1 = tF1.m_sFilename;
CSphString sFile2 = tF2.m_sFilename;
StripPath ( sFile1 );
StripPath ( sFile2 );
if ( sFile1 != sFile2 || tF1.m_uCRC32 != tF2.m_uCRC32 || tF1.m_uSize != tF2.m_uSize )
return false;
}
return true;
}
bool CSphWordforms::ToNormalForm ( BYTE* pWord, bool bBefore, bool bOnlyCheck ) const
{
int* pIndex = m_hHash ( (char*)pWord );
if ( !pIndex )
return false;
if ( *pIndex < 0 || *pIndex >= m_dNormalForms.GetLength() )
return false;
if ( bBefore == m_dNormalForms[*pIndex].m_bAfterMorphology )
return false;
if ( m_dNormalForms[*pIndex].m_sWord.IsEmpty() )
return false;
if ( bOnlyCheck )
return true;
strcpy ( (char*)pWord, m_dNormalForms[*pIndex].m_sWord.cstr() ); // NOLINT
return true;
}
| 2,003
|
C++
|
.cpp
| 60
| 31.2
| 87
| 0.685848
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,979
|
dict_crc.cpp
|
manticoresoftware_manticoresearch/src/dict/dict_crc.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "dict_crc.h"
//////////////////////////////////////////////////////////////////////////
void DiskDictTraits_c::DictBegin ( CSphAutofile&, CSphAutofile& tDict, int iLimit )
{
DiskDictTraits_c::SortedDictBegin ( tDict, iLimit, 0 );
}
void DiskDictTraits_c::SortedDictBegin ( CSphAutofile& tDict, int, int )
{
m_wrDict.CloseFile();
m_wrDict.SetFile ( tDict, nullptr, m_sWriterError );
m_wrDict.PutByte ( 1 );
}
bool DiskDictTraits_c::DictEnd ( DictHeader_t* pHeader, int, CSphString& sError )
{
// flush wordlist checkpoints
pHeader->m_iDictCheckpointsOffset = m_wrDict.GetPos();
pHeader->m_iDictCheckpoints = m_dCheckpoints.GetLength();
ARRAY_FOREACH ( i, m_dCheckpoints )
{
assert ( m_dCheckpoints[i].m_iWordlistOffset );
m_wrDict.PutOffset ( m_dCheckpoints[i].m_uWordID );
m_wrDict.PutOffset ( m_dCheckpoints[i].m_iWordlistOffset );
}
// done
m_wrDict.CloseFile();
if ( m_wrDict.IsError() )
sError = m_sWriterError;
return !m_wrDict.IsError();
}
void DiskDictTraits_c::DictEntry ( const DictEntry_t& tEntry )
{
assert ( m_iSkiplistBlockSize > 0 );
// insert wordlist checkpoint
if ( ( m_iEntries % SPH_WORDLIST_CHECKPOINT ) == 0 )
{
if ( m_iEntries ) // but not the 1st entry
{
assert ( tEntry.m_iDoclistOffset > m_iLastDoclistPos );
m_wrDict.ZipInt ( 0 ); // indicate checkpoint
m_wrDict.ZipOffset ( tEntry.m_iDoclistOffset - m_iLastDoclistPos ); // store last length
}
// restart delta coding, once per SPH_WORDLIST_CHECKPOINT entries
m_iLastWordID = 0;
m_iLastDoclistPos = 0;
// begin new wordlist entry
assert ( m_wrDict.GetPos() <= UINT_MAX );
CSphWordlistCheckpoint& tCheckpoint = m_dCheckpoints.Add();
tCheckpoint.m_uWordID = tEntry.m_uWordID;
tCheckpoint.m_iWordlistOffset = m_wrDict.GetPos();
}
assert ( tEntry.m_iDoclistOffset > m_iLastDoclistPos );
m_wrDict.ZipOffset ( tEntry.m_uWordID - m_iLastWordID ); // FIXME! slow with 32bit wordids
m_wrDict.ZipOffset ( tEntry.m_iDoclistOffset - m_iLastDoclistPos );
m_iLastWordID = tEntry.m_uWordID;
m_iLastDoclistPos = tEntry.m_iDoclistOffset;
assert ( tEntry.m_iDocs );
assert ( tEntry.m_iHits );
m_wrDict.ZipInt ( tEntry.m_iDocs );
m_wrDict.ZipInt ( tEntry.m_iHits );
// write skiplist location info, if any
if ( tEntry.m_iDocs > m_iSkiplistBlockSize )
m_wrDict.ZipOffset ( tEntry.m_iSkiplistOffset );
++m_iEntries;
}
void DiskDictTraits_c::DictEndEntries ( SphOffset_t iDoclistOffset )
{
assert ( iDoclistOffset >= m_iLastDoclistPos );
m_wrDict.ZipInt ( 0 ); // indicate checkpoint
m_wrDict.ZipOffset ( iDoclistOffset - m_iLastDoclistPos ); // store last doclist length
}
//////////////////////////////////////////////////////////////////////////
DictRefPtr_c sphCreateDictionaryCRC ( const CSphDictSettings& tSettings, const CSphEmbeddedFiles* pFiles, const TokenizerRefPtr_c& pTokenizer, const char* szIndex, bool bStripFile, int iSkiplistBlockSize, FilenameBuilder_i* pFilenameBuilder, CSphString& sError )
{
DictRefPtr_c pDict { new CSphDictCRC<CRCALGO::FNV64> };
SetupDictionary ( pDict, tSettings, pFiles, pTokenizer, szIndex, bStripFile, pFilenameBuilder, sError );
// might be empty due to wrong morphology setup
if ( pDict )
pDict->SetSkiplistBlockSize ( iSkiplistBlockSize );
return pDict;
}
| 3,765
|
C++
|
.cpp
| 91
| 39.274725
| 262
| 0.713895
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,980
|
dict_star.cpp
|
manticoresoftware_manticoresearch/src/dict/dict_star.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "dict_proxy.h"
#include "sphinxint.h"
/// dict wrapper for star-syntax support in prefix-indexes
class DictStar_c: public DictProxy_c
{
using DictProxy_c::GetWordID;
public:
explicit DictStar_c ( DictRefPtr_c pDict )
: DictProxy_c ( std::move (pDict) )
{}
SphWordID_t GetWordID ( BYTE* pWord ) final;
};
SphWordID_t DictStar_c::GetWordID ( BYTE* pWord )
{
char sBuf[16 + 3 * SPH_MAX_WORD_LEN];
assert ( strlen ( (const char*)pWord ) < 16 + 3 * SPH_MAX_WORD_LEN );
if ( m_pDict->GetSettings().m_bStopwordsUnstemmed && m_pDict->IsStopWord ( pWord ) )
return 0;
m_pDict->ApplyStemmers ( pWord );
auto iLen = (int)strlen ( (const char*)pWord );
assert ( iLen < 16 + 3 * SPH_MAX_WORD_LEN - 1 );
// stemmer might squeeze out the word
if ( iLen && !pWord[0] )
return 0;
memcpy ( sBuf, pWord, iLen + 1 );
if ( iLen )
{
if ( sphIsWild ( sBuf[iLen - 1] ) )
{
iLen--;
sBuf[iLen] = '\0';
} else
{
sBuf[iLen] = MAGIC_WORD_TAIL;
iLen++;
sBuf[iLen] = '\0';
}
}
return m_pDict->GetWordID ( (BYTE*)sBuf, iLen, !m_pDict->GetSettings().m_bStopwordsUnstemmed );
}
void SetupStarDictOld ( DictRefPtr_c& pDict )
{
pDict = new DictStar_c ( pDict );
}
| 1,674
|
C++
|
.cpp
| 55
| 28.309091
| 96
| 0.685554
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,981
|
dict_star8.cpp
|
manticoresoftware_manticoresearch/src/dict/dict_star8.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "dict_proxy.h"
#include "sphinxint.h"
/// star dict for index v.8+
class DictStarV8_c: public DictProxy_c
{
using DictProxy_c::GetWordID;
public:
DictStarV8_c ( DictRefPtr_c pDict, bool bInfixes )
: DictProxy_c ( std::move (pDict) )
, m_bInfixes ( bInfixes )
{}
SphWordID_t GetWordID ( BYTE* pWord ) final;
private:
bool m_bInfixes;
};
SphWordID_t DictStarV8_c::GetWordID ( BYTE* pWord )
{
char sBuf[16 + 3 * SPH_MAX_WORD_LEN];
auto iLen = (int)strlen ( (const char*)pWord );
iLen = Min ( iLen, 16 + 3 * SPH_MAX_WORD_LEN - 1 );
if ( !iLen )
return 0;
bool bHeadStar = sphIsWild ( pWord[0] );
bool bTailStar = sphIsWild ( pWord[iLen - 1] ) && ( iLen > 1 );
bool bMagic = ( pWord[0] < ' ' );
if ( !bHeadStar && !bTailStar && !bMagic )
{
if ( m_pDict->GetSettings().m_bStopwordsUnstemmed && IsStopWord ( pWord ) )
return 0;
m_pDict->ApplyStemmers ( pWord );
// stemmer might squeeze out the word
if ( !pWord[0] )
return 0;
if ( !m_pDict->GetSettings().m_bStopwordsUnstemmed && IsStopWord ( pWord ) )
return 0;
}
iLen = (int)strlen ( (const char*)pWord );
assert ( iLen < 16 + 3 * SPH_MAX_WORD_LEN - 2 );
if ( !iLen || ( bHeadStar && iLen == 1 ) )
return 0;
if ( bMagic ) // pass throu MAGIC_* words
{
memcpy ( sBuf, pWord, iLen );
sBuf[iLen] = '\0';
} else if ( m_bInfixes )
{
////////////////////////////////////
// infix or mixed infix+prefix mode
////////////////////////////////////
// handle head star
if ( bHeadStar )
{
memcpy ( sBuf, pWord + 1, iLen-- ); // chops star, copies trailing zero, updates iLen
} else
{
sBuf[0] = MAGIC_WORD_HEAD;
memcpy ( sBuf + 1, pWord, ++iLen ); // copies everything incl trailing zero, updates iLen
}
// handle tail star
if ( bTailStar )
{
sBuf[--iLen] = '\0'; // got star, just chop it away
} else
{
sBuf[iLen] = MAGIC_WORD_TAIL; // no star, add tail marker
sBuf[++iLen] = '\0';
}
} else
{
////////////////////
// prefix-only mode
////////////////////
// always ignore head star in prefix mode
if ( bHeadStar )
{
pWord++;
iLen--;
}
// handle tail star
if ( !bTailStar )
{
// exact word search request, always (ie. both in infix/prefix mode) mangles to "\1word\1" in v.8+
sBuf[0] = MAGIC_WORD_HEAD;
memcpy ( sBuf + 1, pWord, iLen );
sBuf[iLen + 1] = MAGIC_WORD_TAIL;
sBuf[iLen + 2] = '\0';
iLen += 2;
} else
{
// prefix search request, mangles to word itself (just chop away the star)
memcpy ( sBuf, pWord, iLen );
sBuf[--iLen] = '\0';
}
}
// calc id for mangled word
return m_pDict->GetWordID ( (BYTE*)sBuf, iLen, !bHeadStar && !bTailStar );
}
void SetupStarDictV8( DictRefPtr_c& pDict, bool bInfixes )
{
pDict = new DictStarV8_c ( pDict, bInfixes );
}
| 3,267
|
C++
|
.cpp
| 112
| 26.446429
| 101
| 0.625959
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,982
|
bin.cpp
|
manticoresoftware_manticoresearch/src/dict/bin.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "bin.h"
#include "fileio.h"
/////////////////////////////////////////////////////////////////////////////
// CHUNK READER
/////////////////////////////////////////////////////////////////////////////
CSphBin::CSphBin ( ESphHitless eMode, bool bWordDict )
: m_eMode ( eMode )
, m_bWordDict ( bWordDict )
{
m_tHit.m_szKeyword = bWordDict ? m_sKeyword.data() : nullptr;
m_sKeyword[0] = '\0';
#ifndef NDEBUG
m_sLastKeyword[0] = '\0';
#endif
}
int CSphBin::CalcBinSize ( int iMemoryLimit, int iBlocks, const char* sPhase )
{
if ( iBlocks <= 0 )
return CSphBin::MIN_SIZE;
int iBinSize = ( ( iMemoryLimit / iBlocks + 2048 ) >> 12 ) << 12; // round to 4k
if ( iBinSize < CSphBin::MIN_SIZE )
{
iBinSize = CSphBin::MIN_SIZE;
sphWarn ( "%s: mem_limit=%d kb extremely low, increasing to %d kb",
sPhase,
iMemoryLimit / 1024,
iBinSize * iBlocks / 1024 );
}
if ( iBinSize < CSphBin::WARN_SIZE )
{
sphWarn ( "%s: increasing mem_limit may improve performance",
sPhase );
}
return iBinSize;
}
void CSphBin::Init ( int iFD, SphOffset_t* pSharedOffset, const int iBinSize )
{
assert ( m_dBuffer.IsEmpty() );
assert ( iBinSize >= MIN_SIZE );
assert ( pSharedOffset );
m_iFile = iFD;
m_pFilePos = pSharedOffset;
m_dBuffer.Reset ( iBinSize );
m_pCurrent = m_dBuffer.begin();
m_tHit.m_tRowID = INVALID_ROWID;
m_tHit.m_uWordID = 0;
m_tHit.m_iWordPos = EMPTY_HIT;
m_tHit.m_dFieldMask.UnsetAll();
m_bError = false;
}
int CSphBin::ReadByte()
{
BYTE r;
if ( !m_iLeft )
{
if ( *m_pFilePos != m_iFilePos )
{
if ( !SeekAndWarn ( m_iFile, m_iFilePos, "CSphBin::ReadBytes" ) )
{
// fixme! That is legacy wrong; BIN_READ_ERROR is legal positive value,
// m.b. that is the place where c++ exception is suitable?
m_bError = true;
return BIN_READ_ERROR;
}
*m_pFilePos = m_iFilePos;
}
int n = m_iFileLeft > m_dBuffer.GetLength()
? m_dBuffer.GetLength()
: (int)m_iFileLeft;
if ( n == 0 )
{
m_iDone = 1;
m_iLeft = 1;
} else
{
assert ( !m_dBuffer.IsEmpty() );
if ( sphReadThrottled ( m_iFile, m_dBuffer.begin(), n ) != (size_t)n )
{
m_bError = true;
return -2;
}
m_iLeft = n;
m_iFilePos += n;
m_iFileLeft -= n;
m_pCurrent = m_dBuffer.begin();
*m_pFilePos += n;
}
}
if ( m_iDone )
{
m_bError = true; // unexpected (!) eof
return -1;
}
m_iLeft--;
r = *( m_pCurrent );
m_pCurrent++;
return r;
}
ESphBinRead CSphBin::ReadBytes ( void* pDest, int iBytes )
{
assert ( iBytes > 0 );
assert ( iBytes <= m_dBuffer.GetLength() );
if ( m_iDone )
return BIN_READ_EOF;
if ( m_iLeft < iBytes )
{
if ( *m_pFilePos != m_iFilePos )
{
if ( !SeekAndWarn ( m_iFile, m_iFilePos, "CSphBin::ReadBytes" ) )
{
m_bError = true;
return BIN_READ_ERROR;
}
*m_pFilePos = m_iFilePos;
}
int n = Min ( m_iFileLeft, m_dBuffer.GetLength() - m_iLeft );
if ( n == 0 )
{
m_iDone = 1;
m_bError = true; // unexpected (!) eof
return BIN_READ_EOF;
}
assert ( !m_dBuffer.IsEmpty() );
memmove ( m_dBuffer.begin(), m_pCurrent, m_iLeft );
if ( sphReadThrottled ( m_iFile, &m_dBuffer[m_iLeft], n ) != (size_t)n )
{
m_bError = true;
return BIN_READ_ERROR;
}
m_iLeft += n;
m_iFilePos += n;
m_iFileLeft -= n;
m_pCurrent = m_dBuffer.begin();
*m_pFilePos += n;
}
assert ( m_iLeft >= iBytes );
m_iLeft -= iBytes;
memcpy ( pDest, m_pCurrent, iBytes );
m_pCurrent += iBytes;
return BIN_READ_OK;
}
// fixme! That is simple UnzipValueLE with extra check; refactor!
SphWordID_t CSphBin::ReadVLB()
{
SphWordID_t uValue = 0;
int iByte, iOffset = 0;
do
{
if ( ( iByte = ReadByte() ) < 0 )
return 0;
uValue += ( ( SphWordID_t ( iByte & 0x7f ) ) << iOffset );
iOffset += 7;
} while ( iByte & 0x80 );
return uValue;
}
// fixme! That is simple UnzipValueBE with extra check; refactor!
DWORD CSphBin::UnzipInt()
{
int b = 0;
DWORD v = 0;
do
{
b = ReadByte();
if ( b < 0 )
b = 0;
v = ( v << 7 ) + ( b & 0x7f );
} while ( b & 0x80 );
return v;
}
// fixme! That is simple UnzipValueBE with extra check; refactor!
SphOffset_t CSphBin::UnzipOffset()
{
int b = 0;
SphOffset_t v = 0;
do
{
b = ReadByte();
if ( b < 0 )
b = 0;
v = ( v << 7 ) + ( b & 0x7f );
} while ( b & 0x80 );
return v;
}
int CSphBin::ReadHit ( AggregateHit_t* pOut )
{
// expected EOB
if ( m_iDone )
{
pOut->m_uWordID = 0;
return 1;
}
AggregateHit_t& tHit = m_tHit; // shortcut
while ( true )
{
// SPH_MAX_WORD_LEN is now 42 only to keep ReadVLB() below
// technically, we can just use different functions on different paths, if ever needed
STATIC_ASSERT ( SPH_MAX_WORD_LEN * 3 <= 127, KEYWORD_TOO_LONG );
SphWordID_t uDelta = ReadVLB();
if ( uDelta )
{
switch ( m_eState )
{
case BIN_WORD:
if ( m_bWordDict )
{
#ifdef NDEBUG
// FIXME?! move this under PARANOID or something?
// or just introduce an assert() checked release build?
if ( uDelta >= std::size ( m_sKeyword ) )
sphDie ( "INTERNAL ERROR: corrupted keyword length (len=" UINT64_FMT ", deltapos=" UINT64_FMT ")",
(uint64_t)uDelta,
(uint64_t)( m_iFilePos - m_iLeft ) );
#else
assert ( uDelta > 0 && uDelta < std::size ( m_sKeyword ) - 1 );
#endif
ReadBytes ( m_sKeyword.data(), (int)uDelta );
m_sKeyword[uDelta] = '\0';
tHit.m_uWordID = sphCRC32 ( m_sKeyword.data() ); // must be in sync with dict!
#ifndef NDEBUG
assert ( ( m_iLastWordID < tHit.m_uWordID )
|| ( m_iLastWordID == tHit.m_uWordID && strcmp ( (char*)m_sLastKeyword.data(), (char*)m_sKeyword.data() ) < 0 ) );
strncpy ( (char*)m_sLastKeyword.data(), (char*)m_sKeyword.data(), std::size ( m_sLastKeyword ) );
#endif
} else
tHit.m_uWordID += uDelta;
tHit.m_tRowID = INVALID_ROWID;
tHit.m_iWordPos = EMPTY_HIT;
tHit.m_dFieldMask.UnsetAll();
m_eState = BIN_DOC;
break;
case BIN_DOC:
// doc id
m_eState = BIN_POS;
tHit.m_tRowID += uDelta;
tHit.m_iWordPos = EMPTY_HIT;
break;
case BIN_POS:
if ( m_eMode == SPH_HITLESS_ALL )
{
tHit.m_dFieldMask.Assign32 ( (DWORD)ReadVLB() );
m_eState = BIN_DOC;
} else if ( m_eMode == SPH_HITLESS_SOME )
{
if ( uDelta & 1 )
{
tHit.m_dFieldMask.Assign32 ( (DWORD)ReadVLB() );
m_eState = BIN_DOC;
}
uDelta >>= 1;
}
tHit.m_iWordPos += (DWORD)uDelta;
*pOut = tHit;
return 1;
default:
sphDie ( "INTERNAL ERROR: unknown bin state (state=%d)", m_eState );
}
} else
{
switch ( m_eState )
{
case BIN_POS: m_eState = BIN_DOC; break;
case BIN_DOC: m_eState = BIN_WORD; break;
case BIN_WORD:
m_iDone = 1;
pOut->m_uWordID = 0;
return 1;
default: sphDie ( "INTERNAL ERROR: unknown bin state (state=%d)", m_eState );
}
}
}
}
bool CSphBin::IsEOF() const
{
return m_iDone != 0 || m_iFileLeft <= 0;
}
bool CSphBin::IsDone() const
{
return m_iDone != 0 || ( m_iFileLeft <= 0 && m_iLeft <= 0 );
}
ESphBinRead CSphBin::Precache()
{
if ( m_iFileLeft > m_dBuffer.GetLength() - m_iLeft )
{
m_bError = true;
return BIN_PRECACHE_ERROR;
}
if ( !m_iFileLeft )
return BIN_PRECACHE_OK;
if ( *m_pFilePos != m_iFilePos )
{
if ( !SeekAndWarn ( m_iFile, m_iFilePos, "CSphBin::Precache" ) )
{
m_bError = true;
return BIN_PRECACHE_ERROR;
}
*m_pFilePos = m_iFilePos;
}
assert ( !m_dBuffer.IsEmpty() );
memmove ( m_dBuffer.begin(), m_pCurrent, m_iLeft );
if ( sphReadThrottled ( m_iFile, &m_dBuffer[m_iLeft], m_iFileLeft ) != (size_t)m_iFileLeft )
{
m_bError = true;
return BIN_READ_ERROR;
}
m_iLeft += m_iFileLeft;
m_iFilePos += m_iFileLeft;
m_iFileLeft -= m_iFileLeft;
m_pCurrent = m_dBuffer.begin();
*m_pFilePos += m_iFileLeft;
return BIN_PRECACHE_OK;
}
| 8,289
|
C++
|
.cpp
| 322
| 22.621118
| 122
| 0.617301
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,983
|
template_dict_traits.cpp
|
manticoresoftware_manticoresearch/src/dict/template_dict_traits.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "template_dict_traits.h"
#if WITH_STEMMER
#include <libstemmer.h>
#endif
#include "sphinxstem.h"
#include "word_forms.h"
#include "tokenizer/multiform_container.h"
#include "tokenizer/tokenizer.h"
#include "tokenizer/charset_definition_parser.h"
#include "sphinxint.h"
#include "sphinxjson.h"
#include "tokenizer/tok_internals.h"
CSphString g_sLemmatizerBase;
/// morphology
enum class EMORPH : int {
STEM_EN,
STEM_RU_UTF8,
STEM_CZ,
STEM_AR_UTF8,
SOUNDEX,
METAPHONE_UTF8,
AOTLEMMER_BASE,
AOTLEMMER_RU_UTF8 = AOTLEMMER_BASE,
AOTLEMMER_EN,
AOTLEMMER_DE_UTF8,
AOTLEMMER_UK,
AOTLEMMER_BASE_ALL,
AOTLEMMER_RU_ALL = AOTLEMMER_BASE_ALL,
AOTLEMMER_EN_ALL,
AOTLEMMER_DE_ALL,
AOTLEMMER_UK_ALL,
LIBSTEMMER_FIRST,
LIBSTEMMER_LAST = LIBSTEMMER_FIRST + 64
};
void ConcatReportStrings ( const CSphTightVector<CSphString> & dStrings, CSphString & sReport )
{
sReport = StrVec2Str ( dStrings, " " );
}
void ConcatReportStrings ( const CSphTightVector<CSphNormalForm> & dStrings, CSphString & sReport )
{
StringBuilder_c sTmp ( " " );
dStrings.Apply ( [&sTmp] ( const CSphNormalForm & tForms ) { sTmp << tForms.m_sForm; } );
sReport = CSphString ( sTmp );
}
/////////////////////////////////////////////////////////////////////////////
CSphVector<CSphWordforms*> TemplateDictTraits_c::m_dWordformContainers;
TemplateDictTraits_c::TemplateDictTraits_c() = default;
TemplateDictTraits_c::~TemplateDictTraits_c()
{
#if WITH_STEMMER
for ( void* pStemmer : m_dStemmers )
sb_stemmer_delete ( (sb_stemmer*)pStemmer );
#endif
if ( m_pWordforms )
--m_pWordforms->m_iRefCount;
}
SphWordID_t TemplateDictTraits_c::FilterStopword ( SphWordID_t uID ) const
{
if ( !m_iStopwords )
return uID;
// OPTIMIZE: binary search is not too good, could do some hashing instead
SphWordID_t* pStart = m_pStopwords;
SphWordID_t* pEnd = m_pStopwords + m_iStopwords - 1;
do
{
if ( uID == *pStart || uID == *pEnd )
return 0;
if ( uID < *pStart || uID > *pEnd )
return uID;
SphWordID_t* pMid = pStart + ( pEnd - pStart ) / 2;
if ( uID == *pMid )
return 0;
if ( uID < *pMid )
pEnd = pMid;
else
pStart = pMid;
} while ( pEnd - pStart > 1 );
return uID;
}
int TemplateDictTraits_c::ParseMorphology ( const char* sMorph, CSphString& sMessage )
{
int iRes = ST_OK;
for ( const char* sStart = sMorph;; )
{
while ( *sStart && ( sphIsSpace ( *sStart ) || *sStart == ',' ) )
++sStart;
if ( !*sStart )
break;
const char* sWordStart = sStart;
while ( *sStart && !sphIsSpace ( *sStart ) && *sStart != ',' )
++sStart;
if ( sStart > sWordStart )
{
switch ( InitMorph ( sWordStart, int ( sStart - sWordStart ), sMessage ) )
{
case ST_ERROR: return ST_ERROR;
case ST_WARNING: iRes = ST_WARNING; break;
default: break;
}
}
}
return iRes;
}
int TemplateDictTraits_c::InitMorph ( const char* szMorph, int iLength, CSphString& sMessage )
{
if ( iLength == 0 )
return ST_OK;
if ( iLength == 4 && !strncmp ( szMorph, "none", iLength ) )
return ST_OK;
if ( iLength == 7 && !strncmp ( szMorph, "stem_en", iLength ) )
{
if ( m_dMorph.Contains ( (int)EMORPH::AOTLEMMER_EN ) )
{
sMessage.SetSprintf ( "stem_en and lemmatize_en clash" );
return ST_ERROR;
}
if ( m_dMorph.Contains ( (int)EMORPH::AOTLEMMER_EN_ALL ) )
{
sMessage.SetSprintf ( "stem_en and lemmatize_en_all clash" );
return ST_ERROR;
}
stem_en_init();
return AddMorph ( (int)EMORPH::STEM_EN );
}
if ( iLength == 7 && !strncmp ( szMorph, "stem_ru", iLength ) )
{
if ( m_dMorph.Contains ( (int)EMORPH::AOTLEMMER_RU_UTF8 ) )
{
sMessage.SetSprintf ( "stem_ru and lemmatize_ru clash" );
return ST_ERROR;
}
if ( m_dMorph.Contains ( (int)EMORPH::AOTLEMMER_RU_ALL ) )
{
sMessage.SetSprintf ( "stem_ru and lemmatize_ru_all clash" );
return ST_ERROR;
}
stem_ru_init();
return AddMorph ( (int)EMORPH::STEM_RU_UTF8 );
}
for ( int j = 0; j < AOT_LENGTH; ++j )
{
char buf[20];
char buf_all[20];
snprintf ( buf, 19, "lemmatize_%s", AOT_LANGUAGES[j] ); // NOLINT
snprintf ( buf_all, 19, "lemmatize_%s_all", AOT_LANGUAGES[j] ); // NOLINT
buf[19] = '\0';
buf_all[19] = '\0';
if ( iLength == 12 && !strncmp ( szMorph, buf, iLength ) )
{
if ( j == AOT_RU && m_dMorph.Contains ( (int)EMORPH::STEM_RU_UTF8 ) )
{
sMessage.SetSprintf ( "stem_ru and lemmatize_ru clash" );
return ST_ERROR;
}
if ( j == AOT_EN && m_dMorph.Contains ( (int)EMORPH::STEM_EN ) )
{
sMessage.SetSprintf ( "stem_en and lemmatize_en clash" );
return ST_ERROR;
}
// no test for SPH_MORPH_STEM_DE since we doesn't have it.
if ( m_dMorph.Contains ( static_cast<int> ( EMORPH::AOTLEMMER_BASE_ALL ) + j ) )
{
sMessage.SetSprintf ( "%s and %s clash", buf, buf_all );
return ST_ERROR;
}
auto sDictFile = SphSprintf ( "%s/%s.pak", g_sLemmatizerBase.cstr(), AOT_LANGUAGES[j] );
if ( !sphAotInit ( sDictFile, sMessage, j ) )
return ST_ERROR;
if ( j == AOT_UK && !m_tLemmatizer )
m_tLemmatizer = CreateLemmatizer ( j );
// add manually instead of AddMorph(), because we need to update that fingerprint
int iMorph;
switch ( j )
{
case AOT_RU: iMorph = (int)EMORPH::AOTLEMMER_RU_UTF8; break;
case AOT_DE: iMorph = (int)EMORPH::AOTLEMMER_DE_UTF8; break;
case AOT_UK: iMorph = (int)EMORPH::AOTLEMMER_UK; break;
default: iMorph = j + (int)EMORPH::AOTLEMMER_BASE;
}
if ( !m_dMorph.Contains ( iMorph ) )
{
if ( m_sMorphFingerprint.IsEmpty() )
m_sMorphFingerprint.SetSprintf ( "%s:%08x", sphAotDictinfo ( j ).first.cstr(), sphAotDictinfo ( j ).second );
else
m_sMorphFingerprint.SetSprintf ( "%s;%s:%08x", m_sMorphFingerprint.cstr(), sphAotDictinfo ( j ).first.cstr(), sphAotDictinfo ( j ).second );
m_dMorph.Add ( iMorph );
}
return ST_OK;
}
if ( iLength == 16 && !strncmp ( szMorph, buf_all, iLength ) )
{
if ( j == AOT_RU && ( m_dMorph.Contains ( (int)EMORPH::STEM_RU_UTF8 ) ) )
{
sMessage.SetSprintf ( "stem_ru and lemmatize_ru_all clash" );
return ST_ERROR;
}
if ( m_dMorph.Contains ( (int)EMORPH::AOTLEMMER_BASE + j ) )
{
sMessage.SetSprintf ( "%s and %s clash", buf, buf_all );
return ST_ERROR;
}
auto sDictFile = SphSprintf ( "%s/%s.pak", g_sLemmatizerBase.cstr(), AOT_LANGUAGES[j] );
if ( !sphAotInit ( sDictFile, sMessage, j ) )
return ST_ERROR;
if ( j == AOT_UK && !m_tLemmatizer )
m_tLemmatizer = CreateLemmatizer ( j );
return AddMorph ( (int)EMORPH::AOTLEMMER_BASE_ALL + j );
}
}
if ( iLength == 7 && !strncmp ( szMorph, "stem_cz", iLength ) )
{
stem_cz_init();
return AddMorph ( (int)EMORPH::STEM_CZ );
}
if ( iLength == 7 && !strncmp ( szMorph, "stem_ar", iLength ) )
return AddMorph ( (int)EMORPH::STEM_AR_UTF8 );
if ( iLength == 9 && !strncmp ( szMorph, "stem_enru", iLength ) )
{
stem_en_init();
stem_ru_init();
AddMorph ( (int)EMORPH::STEM_EN );
return AddMorph ( (int)EMORPH::STEM_RU_UTF8 );
}
if ( iLength == 7 && !strncmp ( szMorph, "soundex", iLength ) )
return AddMorph ( (int)EMORPH::SOUNDEX );
if ( iLength == 9 && !strncmp ( szMorph, "metaphone", iLength ) )
return AddMorph ( (int)EMORPH::METAPHONE_UTF8 );
#if WITH_STEMMER
const int LIBSTEMMER_LEN = 11;
const int MAX_ALGO_LENGTH = 64;
if ( iLength > LIBSTEMMER_LEN && iLength - LIBSTEMMER_LEN < MAX_ALGO_LENGTH && !strncmp ( szMorph, "libstemmer_", LIBSTEMMER_LEN ) )
{
CSphString sAlgo;
sAlgo.SetBinary ( szMorph + LIBSTEMMER_LEN, iLength - LIBSTEMMER_LEN );
sb_stemmer* pStemmer = nullptr;
pStemmer = sb_stemmer_new ( sAlgo.cstr(), "UTF_8" );
if ( !pStemmer )
{
sMessage.SetSprintf ( "unknown stemmer libstemmer_%s; skipped", sAlgo.cstr() );
return ST_WARNING;
}
AddMorph ( (int)EMORPH::LIBSTEMMER_FIRST + m_dStemmers.GetLength() );
for ( const auto* pStemmer_c: m_dStemmers )
if ( pStemmer_c == pStemmer )
{
sb_stemmer_delete ( pStemmer );
return ST_OK;
}
m_dStemmers.Add ( pStemmer );
m_dDescStemmers.Add ( sAlgo );
return ST_OK;
}
#endif
if ( iLength == 11 && !strncmp ( szMorph, "icu_chinese", iLength ) )
return ST_OK;
if ( iLength == 13 && !strncmp ( szMorph, "jieba_chinese", iLength ) )
return ST_OK;
sMessage.SetBinary ( szMorph, iLength );
sMessage.SetSprintf ( "unknown stemmer %s", sMessage.cstr() );
return ST_ERROR;
}
int TemplateDictTraits_c::AddMorph ( int iMorph )
{
if ( !m_dMorph.Contains ( iMorph ) )
m_dMorph.Add ( iMorph );
return ST_OK;
}
void TemplateDictTraits_c::ApplyStemmers ( BYTE* pWord ) const
{
// try wordforms
if ( m_pWordforms && m_pWordforms->ToNormalForm ( pWord, true, m_bDisableWordforms ) )
return;
// check length
if ( m_tSettings.m_iMinStemmingLen <= 1 || sphUTF8Len ( (const char*)pWord ) >= m_tSettings.m_iMinStemmingLen )
{
// try stemmers
for ( int iMorph : m_dMorph )
if ( StemById ( pWord, iMorph ) )
break;
}
if ( m_pWordforms && m_pWordforms->m_bHavePostMorphNF )
m_pWordforms->ToNormalForm ( pWord, false, m_bDisableWordforms );
}
const CSphMultiformContainer* TemplateDictTraits_c::GetMultiWordforms() const
{
return m_pWordforms ? m_pWordforms->m_pMultiWordforms.get() : nullptr;
}
uint64_t TemplateDictTraits_c::GetSettingsFNV() const
{
auto uHash = (uint64_t)m_pWordforms;
if ( m_pStopwords )
uHash = sphFNV64 ( m_pStopwords, m_iStopwords * sizeof ( *m_pStopwords ), uHash );
uHash = sphFNV64 ( &m_tSettings.m_iMinStemmingLen, sizeof ( m_tSettings.m_iMinStemmingLen ), uHash );
DWORD uFlags = 0;
if ( m_tSettings.m_bWordDict )
uFlags |= 1 << 0;
if ( m_tSettings.m_bStopwordsUnstemmed )
uFlags |= 1 << 2;
uHash = sphFNV64 ( &uFlags, sizeof ( uFlags ), uHash );
uHash = sphFNV64 ( m_dMorph.Begin(), m_dMorph.GetLength() * sizeof ( m_dMorph[0] ), uHash );
#if WITH_STEMMER
for ( const CSphString& sDescStemmer : m_dDescStemmers )
uHash = sphFNV64 ( sDescStemmer.cstr(), sDescStemmer.Length(), uHash );
#endif
return uHash;
}
DictRefPtr_c TemplateDictTraits_c::CloneBase ( TemplateDictTraits_c* pDict ) const
{
assert ( pDict );
pDict->m_tSettings = m_tSettings;
pDict->m_iStopwords = m_iStopwords;
pDict->m_pStopwords = m_pStopwords;
pDict->m_dSWFileInfos = m_dSWFileInfos;
pDict->m_dWFFileInfos = m_dWFFileInfos;
pDict->m_pWordforms = m_pWordforms;
if ( m_pWordforms )
m_pWordforms->m_iRefCount++;
pDict->m_dMorph = m_dMorph;
#if WITH_STEMMER
assert ( m_dDescStemmers.GetLength() == m_dStemmers.GetLength() );
pDict->m_dDescStemmers = m_dDescStemmers;
ARRAY_FOREACH ( i, m_dDescStemmers )
{
pDict->m_dStemmers.Add ( sb_stemmer_new ( m_dDescStemmers[i].cstr(), "UTF_8" ) );
assert ( pDict->m_dStemmers.Last() );
}
#endif
if ( m_tLemmatizer )
pDict->m_tLemmatizer = CreateLemmatizer ( AOT_UK );
return DictRefPtr_c { pDict };
}
bool TemplateDictTraits_c::HasState() const
{
#if !WITH_STEMMER
return ( (bool)m_tLemmatizer );
#else
return ( m_dDescStemmers.GetLength() > 0 || m_tLemmatizer );
#endif
}
void TemplateDictTraits_c::LoadStopwords ( const char * sFiles, FilenameBuilder_i * pFilenameBuilder, const TokenizerRefPtr_c & pTokenizer, bool bStripFile )
{
assert ( !m_pStopwords );
assert ( !m_iStopwords );
// tokenize file list
if ( !sFiles || !*sFiles )
return;
m_dSWFileInfos.Resize ( 0 );
TokenizerRefPtr_c pTokenizerClone = pTokenizer->Clone ( SPH_CLONE_INDEX );
CSphFixedVector<char> dList ( 1 + (int)strlen ( sFiles ) );
strcpy ( dList.Begin(), sFiles ); // NOLINT
char* pCur = dList.Begin();
char* sName = nullptr;
CSphVector<SphWordID_t> dStop;
while ( true )
{
// find next name start
while ( *pCur && ( isspace ( *pCur ) || *pCur == ',' ) )
pCur++;
if ( !*pCur )
break;
sName = pCur;
// find next name end
while ( *pCur && !( isspace ( *pCur ) || *pCur == ',' ) )
pCur++;
if ( *pCur )
*pCur++ = '\0';
CSphString sFileName = sName;
if ( pFilenameBuilder )
sFileName = pFilenameBuilder->GetFullPath ( sFileName );
bool bGotFile = sphIsReadable ( sFileName );
if ( !bGotFile )
{
if ( bStripFile )
{
StripPath ( sFileName );
bGotFile = sphIsReadable ( sFileName );
}
if ( !bGotFile )
{
if ( !bStripFile )
StripPath ( sFileName );
sFileName.SetSprintf ( "%s/stopwords/%s", GET_FULL_SHARE_DIR(), sFileName.cstr() );
bGotFile = sphIsReadable ( sFileName );
}
}
CSphFixedVector<BYTE> dBuffer ( 0 );
CSphSavedFile tInfo;
tInfo.Collect ( sFileName.cstr() );
// need to store original name to compatible with original behavior of load order
// from path defined; from tool CWD; from SHARE_DIR
tInfo.m_sFilename = sName;
m_dSWFileInfos.Add ( tInfo );
if ( !bGotFile )
{
StringBuilder_c sError;
sError.Appendf ( "failed to load stopwords from either '%s' or '%s'", sName, sFileName.cstr() );
if ( bStripFile )
sError += ", current work directory";
sphWarn ( "%s", sError.cstr() );
continue;
}
// open file
FILE* fp = fopen ( sFileName.cstr(), "rb" );
if ( !fp )
{
sphWarn ( "failed to load stopwords from '%s'", sFileName.cstr() );
continue;
}
struct_stat st = { 0 };
if ( fstat ( fileno ( fp ), &st ) == 0 )
dBuffer.Reset ( st.st_size );
else
{
fclose ( fp );
sphWarn ( "stopwords: failed to get file size for '%s'", sFileName.cstr() );
continue;
}
// tokenize file
int iLength = (int)fread ( dBuffer.Begin(), 1, (size_t)st.st_size, fp );
// tokenize stopwords line by line to prevent exceptions to fold multiple lines
sphSplitApply ( (const char *)dBuffer.Begin(), iLength, "\r\n", [&] ( const char * sLine, int iLineLen )
{
BYTE* pToken;
pTokenizerClone->SetBuffer ( (const BYTE *)sLine, iLineLen );
while ( ( pToken = pTokenizerClone->GetToken() ) != nullptr )
{
if ( m_tSettings.m_bStopwordsUnstemmed )
dStop.Add ( GetWordIDNonStemmed ( pToken ) );
else
dStop.Add ( GetWordID ( pToken ) );
}
} );
// close file
fclose ( fp );
}
// sort stopwords
dStop.Uniq();
// store IDs
if ( dStop.GetLength() )
{
m_dStopwordContainer.Reset ( dStop.GetLength() );
ARRAY_FOREACH ( i, dStop )
m_dStopwordContainer[i] = dStop[i];
m_iStopwords = m_dStopwordContainer.GetLength();
m_pStopwords = m_dStopwordContainer.Begin();
}
}
void TemplateDictTraits_c::LoadStopwords ( const CSphVector<SphWordID_t>& dStopwords )
{
m_dStopwordContainer.Reset ( dStopwords.GetLength() );
ARRAY_FOREACH ( i, dStopwords )
m_dStopwordContainer[i] = dStopwords[i];
m_iStopwords = m_dStopwordContainer.GetLength();
m_pStopwords = m_dStopwordContainer.Begin();
}
void TemplateDictTraits_c::WriteStopwords ( Writer_i & tWriter ) const
{
tWriter.PutDword ( (DWORD)m_iStopwords );
for ( int i = 0; i < m_iStopwords; ++i )
tWriter.ZipOffset ( m_pStopwords[i] );
}
void TemplateDictTraits_c::WriteStopwords ( JsonEscapedBuilder& tOut ) const
{
if ( !m_iStopwords )
return;
tOut.Named ( "stopwords_list" );
auto _ = tOut.Array();
for ( int i = 0; i < m_iStopwords; ++i )
tOut << cast2signed ( m_pStopwords[i] );
}
void TemplateDictTraits_c::SweepWordformContainers ( const CSphVector<CSphSavedFile>& dFiles )
{
for ( int i = 0; i < m_dWordformContainers.GetLength(); )
{
CSphWordforms* WC = m_dWordformContainers[i];
if ( WC->m_iRefCount == 0 && !WC->IsEqual ( dFiles ) )
{
delete WC;
m_dWordformContainers.Remove ( i );
} else
++i;
}
}
CSphWordforms* TemplateDictTraits_c::GetWordformContainer ( const CSphVector<CSphSavedFile>& dFileInfos, const StrVec_t* pEmbedded, const TokenizerRefPtr_c& pTokenizer, const char* szIndex )
{
uint64_t uTokenizerFNV = pTokenizer->GetSettingsFNV();
for ( CSphWordforms *pContainer : m_dWordformContainers )
if ( pContainer->IsEqual ( dFileInfos ) )
{
if ( uTokenizerFNV == pContainer->m_uTokenizerFNV )
return pContainer;
CSphTightVector<CSphString> dErrorReport;
for ( const auto& j : dFileInfos )
dErrorReport.Add ( j.m_sFilename );
CSphString sAllFiles;
ConcatReportStrings ( dErrorReport, sAllFiles );
sphWarning ( "table '%s': wordforms file '%s' is shared with table '%s', but tokenizer settings are different", szIndex, sAllFiles.cstr(), pContainer->m_sIndexName.cstr() );
}
CSphWordforms* pContainer = LoadWordformContainer ( dFileInfos, pEmbedded, pTokenizer, szIndex );
if ( pContainer )
m_dWordformContainers.Add ( pContainer );
return pContainer;
}
void TemplateDictTraits_c::AddWordform ( CSphWordforms* pContainer, char* sBuffer, int iLen, const TokenizerRefPtr_c& pTokenizer, const char* szFile, const CSphVector<int>& dBlended, int iFileId )
{
StrVec_t dTokens;
bool bSeparatorFound = false;
bool bAfterMorphology = false;
// parse the line
pTokenizer->SetBuffer ( (BYTE*)sBuffer, iLen );
bool bFirstToken = true;
bool bStopwordsPresent = false;
bool bCommentedWholeLine = false;
BYTE* pFrom = nullptr;
while ( ( pFrom = pTokenizer->GetTokenEscaped() ) != nullptr )
{
if ( *pFrom == '#' )
{
bCommentedWholeLine = bFirstToken;
break;
}
if ( *pFrom == '~' && bFirstToken && *pTokenizer->GetTokenStart()!='\\' )
{
bAfterMorphology = true;
bFirstToken = false;
continue;
}
bFirstToken = false;
// single token could also be escaped regular and not the delimiter
if ( *pFrom == '>' )
{
// GetTokenStart is not the same as the token itself and could point to escape sequence
if ( *pTokenizer->GetTokenStart()!='\\' )
{
bSeparatorFound = true;
break;
}
}
// token could also be escaped regular and not the delimiter
if ( *pFrom == '=' && *pTokenizer->GetBufferPtr() == '>' )
{
if ( *pTokenizer->GetTokenStart()!='\\' )
{
pTokenizer->GetToken();
bSeparatorFound = true;
break;
}
}
if ( GetWordID ( pFrom, (int)strlen ( (const char*)pFrom ), true ) )
dTokens.Add ( (const char*)pFrom );
else
bStopwordsPresent = true;
}
if ( !dTokens.GetLength() )
{
if ( !bCommentedWholeLine )
sphWarning ( "table '%s': all wordform source tokens are stopwords (wordform='%s', file='%s'). IGNORED.", pContainer->m_sIndexName.cstr(), sBuffer, szFile );
return;
}
if ( !bSeparatorFound )
{
sphWarning ( "table '%s': no wordform separator found (wordform='%s', file='%s'). IGNORED.", pContainer->m_sIndexName.cstr(), sBuffer, szFile );
return;
}
BYTE* pTo = pTokenizer->GetToken();
if ( !pTo )
{
sphWarning ( "table '%s': no destination token found (wordform='%s', file='%s'). IGNORED.", pContainer->m_sIndexName.cstr(), sBuffer, szFile );
return;
}
if ( *pTo == '#' )
{
sphWarning ( "table '%s': misplaced comment (wordform='%s', file='%s'). IGNORED.", pContainer->m_sIndexName.cstr(), sBuffer, szFile );
return;
}
CSphVector<CSphNormalForm> dDestTokens;
bool bFirstDestIsStop = !GetWordID ( pTo, (int)strlen ( (const char*)pTo ), true );
CSphNormalForm& tForm = dDestTokens.Add();
tForm.m_sForm = (const char*)pTo;
tForm.m_iLengthCP = pTokenizer->GetLastTokenLen();
// what if we have more than one word in the right part?
const BYTE* pDestToken;
while ( ( pDestToken = pTokenizer->GetToken() ) != nullptr )
{
bool bStop = ( !GetWordID ( pDestToken, (int)strlen ( (const char*)pDestToken ), true ) );
if ( !bStop )
{
CSphNormalForm& tNewForm = dDestTokens.Add();
tNewForm.m_sForm = (const char*)pDestToken;
tNewForm.m_iLengthCP = pTokenizer->GetLastTokenLen();
}
bStopwordsPresent |= bStop;
}
// we can have wordforms with 1 destination token that is a stopword
if ( dDestTokens.GetLength() > 1 && bFirstDestIsStop )
dDestTokens.Remove ( 0 );
if ( !dDestTokens.GetLength() )
{
sphWarning ( "table '%s': destination token is a stopword (wordform='%s', file='%s'). IGNORED.", pContainer->m_sIndexName.cstr(), sBuffer, szFile );
return;
}
if ( bStopwordsPresent )
sphWarning ( "table '%s': wordform contains stopwords (wordform='%s'). Fix your wordforms file '%s'.", pContainer->m_sIndexName.cstr(), sBuffer, szFile );
// we disabled all blended, so we need to filter them manually
bool bBlendedPresent = false;
if ( dBlended.GetLength() )
for ( const auto& tDestToken : dDestTokens )
{
int iCode;
const BYTE* pBuf = (const BYTE*)tDestToken.m_sForm.cstr();
while ( ( iCode = sphUTF8Decode ( pBuf ) ) > 0 && !bBlendedPresent )
bBlendedPresent = ( dBlended.BinarySearch ( iCode ) != nullptr );
}
if ( bBlendedPresent )
sphWarning ( "invalid mapping (destination contains blended characters) (wordform='%s'). Fix your wordforms file '%s'.", sBuffer, szFile );
if ( bBlendedPresent && dDestTokens.GetLength() > 1 )
{
sphWarning ( "blended characters are not allowed with multiple destination tokens (wordform='%s', file='%s'). IGNORED.", sBuffer, szFile );
return;
}
if ( dTokens.GetLength() > 1 || dDestTokens.GetLength() > 1 )
{
auto pMultiWordform = std::make_unique<CSphMultiform>();
pMultiWordform->m_iFileId = iFileId;
pMultiWordform->m_dNormalForm.Resize ( dDestTokens.GetLength() );
ARRAY_FOREACH ( i, dDestTokens )
pMultiWordform->m_dNormalForm[i] = dDestTokens[i];
for ( int i = 1; i < dTokens.GetLength(); ++i )
pMultiWordform->m_dTokens.Add ( dTokens[i] );
if ( !pContainer->m_pMultiWordforms )
pContainer->m_pMultiWordforms = std::make_unique<CSphMultiformContainer>();
CSphMultiforms** ppWordforms = pContainer->m_pMultiWordforms->m_Hash ( dTokens[0] );
if ( ppWordforms )
{
auto* pWordforms = *ppWordforms;
for ( const auto& pStoredMF : pWordforms->m_pForms )
{
if ( pStoredMF->m_dTokens.GetLength() == pMultiWordform->m_dTokens.GetLength() )
{
bool bSameTokens = true;
ARRAY_FOREACH_COND ( iToken, pStoredMF->m_dTokens, bSameTokens )
if ( pStoredMF->m_dTokens[iToken] != pMultiWordform->m_dTokens[iToken] )
bSameTokens = false;
if ( bSameTokens )
{
CSphString sStoredTokens, sStoredForms;
ConcatReportStrings ( pStoredMF->m_dTokens, sStoredTokens );
ConcatReportStrings ( pStoredMF->m_dNormalForm, sStoredForms );
sphWarning ( "table '%s': duplicate wordform found - overridden ( current='%s', old='%s %s > %s' ). Fix your wordforms file '%s'.",
pContainer->m_sIndexName.cstr(),
sBuffer,
dTokens[0].cstr(),
sStoredTokens.cstr(),
sStoredForms.cstr(),
szFile );
pStoredMF->m_dNormalForm.Resize ( pMultiWordform->m_dNormalForm.GetLength() );
ARRAY_FOREACH ( iForm, pMultiWordform->m_dNormalForm )
pStoredMF->m_dNormalForm[iForm] = pMultiWordform->m_dNormalForm[iForm];
pStoredMF->m_iFileId = iFileId;
pMultiWordform.reset();
break; // otherwise, we crash next turn
}
}
}
if ( pMultiWordform )
{
pWordforms->m_iMinTokens = Min ( pWordforms->m_iMinTokens, pMultiWordform->m_dTokens.GetLength() );
pWordforms->m_iMaxTokens = Max ( pWordforms->m_iMaxTokens, pMultiWordform->m_dTokens.GetLength() );
pWordforms->m_pForms.Add ( pMultiWordform.release() );
// sort forms by files and length
// but do not sort if we're loading embedded
if ( iFileId >= 0 )
pWordforms->m_pForms.Sort ( Lesser ( [] ( const CSphMultiform* pA, const CSphMultiform* pB ) noexcept {
assert ( pA && pB );
return ( pA->m_iFileId == pB->m_iFileId ) ? pA->m_dTokens.GetLength() > pB->m_dTokens.GetLength() : pA->m_iFileId > pB->m_iFileId;
} ) );
pContainer->m_pMultiWordforms->m_iMaxTokens = Max ( pContainer->m_pMultiWordforms->m_iMaxTokens, pWordforms->m_iMaxTokens );
}
} else
{
auto pNewWordforms = std::make_unique<CSphMultiforms>();
pNewWordforms->m_iMinTokens = pMultiWordform->m_dTokens.GetLength();
pNewWordforms->m_iMaxTokens = pMultiWordform->m_dTokens.GetLength();
pNewWordforms->m_pForms.Add ( pMultiWordform.release() );
pContainer->m_pMultiWordforms->m_iMaxTokens = Max ( pContainer->m_pMultiWordforms->m_iMaxTokens, pNewWordforms->m_iMaxTokens );
pContainer->m_pMultiWordforms->m_Hash.Add ( pNewWordforms.release(), dTokens[0] );
}
// let's add destination form to regular wordform to keep destination from being stemmed
// FIXME!!! handle multiple destination tokens and ~flag for wordforms
if ( !bAfterMorphology && dDestTokens.GetLength() == 1 && !pContainer->m_hHash.Exists ( dDestTokens[0].m_sForm ) )
{
CSphStoredNF tStoredForm;
tStoredForm.m_sWord = dDestTokens[0].m_sForm;
tStoredForm.m_bAfterMorphology = bAfterMorphology;
pContainer->m_bHavePostMorphNF |= bAfterMorphology;
if ( !pContainer->m_dNormalForms.GetLength()
|| pContainer->m_dNormalForms.Last().m_sWord != dDestTokens[0].m_sForm
|| pContainer->m_dNormalForms.Last().m_bAfterMorphology != bAfterMorphology )
pContainer->m_dNormalForms.Add ( tStoredForm );
pContainer->m_hHash.Add ( pContainer->m_dNormalForms.GetLength() - 1, dDestTokens[0].m_sForm );
}
} else
{
if ( bAfterMorphology )
{
BYTE pBuf[16 + 3 * SPH_MAX_WORD_LEN];
memcpy ( pBuf, dTokens[0].cstr(), dTokens[0].Length() + 1 );
ApplyStemmers ( pBuf );
dTokens[0] = (char*)pBuf;
}
// check wordform that source token is a new token or has same destination token
int* pRefTo = pContainer->m_hHash ( dTokens[0] );
assert ( !pRefTo || ( *pRefTo >= 0 && *pRefTo < pContainer->m_dNormalForms.GetLength() ) );
if ( pRefTo )
{
// replace with a new wordform
if ( pContainer->m_dNormalForms[*pRefTo].m_sWord != dDestTokens[0].m_sForm || pContainer->m_dNormalForms[*pRefTo].m_bAfterMorphology != bAfterMorphology )
{
CSphStoredNF& tRefTo = pContainer->m_dNormalForms[*pRefTo];
sphWarning ( "table '%s': duplicate wordform found - overridden ( current='%s', old='%s%s > %s' ). Fix your wordforms file '%s'.",
pContainer->m_sIndexName.cstr(),
sBuffer,
tRefTo.m_bAfterMorphology ? "~" : "",
dTokens[0].cstr(),
tRefTo.m_sWord.cstr(),
szFile );
tRefTo.m_sWord = dDestTokens[0].m_sForm;
tRefTo.m_bAfterMorphology = bAfterMorphology;
pContainer->m_bHavePostMorphNF |= bAfterMorphology;
} else
sphWarning ( "table '%s': duplicate wordform found ( '%s' ). Fix your wordforms file '%s'.", pContainer->m_sIndexName.cstr(), sBuffer, szFile );
} else
{
CSphStoredNF tStoredForm;
tStoredForm.m_sWord = dDestTokens[0].m_sForm;
tStoredForm.m_bAfterMorphology = bAfterMorphology;
pContainer->m_bHavePostMorphNF |= bAfterMorphology;
if ( !pContainer->m_dNormalForms.GetLength()
|| pContainer->m_dNormalForms.Last().m_sWord != dDestTokens[0].m_sForm
|| pContainer->m_dNormalForms.Last().m_bAfterMorphology != bAfterMorphology )
pContainer->m_dNormalForms.Add ( tStoredForm );
pContainer->m_hHash.Add ( pContainer->m_dNormalForms.GetLength() - 1, dTokens[0] );
}
}
}
CSphWordforms* TemplateDictTraits_c::LoadWordformContainer ( const CSphVector<CSphSavedFile>& dFileInfos, const StrVec_t* pEmbeddedWordforms, const TokenizerRefPtr_c& pTokenizer, const char* szIndex )
{
// allocate it
auto pContainer = std::make_unique<CSphWordforms>();
pContainer->m_dFiles = dFileInfos;
pContainer->m_uTokenizerFNV = pTokenizer->GetSettingsFNV();
pContainer->m_sIndexName = szIndex;
TokenizerRefPtr_c pMyTokenizer = pTokenizer->Clone ( SPH_CLONE_INDEX );
const CSphTokenizerSettings& tSettings = pMyTokenizer->GetSettings();
CSphVector<int> dBlended;
// get a list of blend chars and set add them to the tokenizer as simple chars
if ( tSettings.m_sBlendChars.Length() )
{
StringBuilder_c sNewCharset;
sNewCharset << tSettings.m_sCaseFolding;
CSphVector<CSphRemapRange> dRemaps;
if ( sphParseCharset ( tSettings.m_sBlendChars.cstr(), dRemaps ) )
for ( const auto& dRemap : dRemaps )
for ( int j = dRemap.m_iStart; j <= dRemap.m_iEnd; ++j )
{
sNewCharset << ", " << (char)j;
dBlended.Add ( j );
}
// sort dBlended for binary search
dBlended.Sort();
CSphString sError;
pMyTokenizer->SetCaseFolding ( sNewCharset.cstr(), sError );
// disable blend chars
pMyTokenizer->SetBlendChars ( nullptr, sError );
}
// add wordform-specific specials
pMyTokenizer->AddSpecials ( "#=>~" );
if ( pEmbeddedWordforms )
{
CSphTightVector<CSphString> dFilenames;
dFilenames.Resize ( dFileInfos.GetLength() );
ARRAY_FOREACH ( i, dFileInfos )
dFilenames[i] = dFileInfos[i].m_sFilename;
CSphString sAllFiles;
ConcatReportStrings ( dFilenames, sAllFiles );
for ( auto& sWordForm : ( *pEmbeddedWordforms ) )
AddWordform ( pContainer.get(), const_cast<char*> ( sWordForm.cstr() ), sWordForm.Length(), pMyTokenizer, sAllFiles.cstr(), dBlended, -1 );
} else
{
char sBuffer[6 * SPH_MAX_WORD_LEN + 512]; // enough to hold 2 UTF-8 words, plus some whitespace overhead
ARRAY_FOREACH ( i, dFileInfos )
{
CSphAutoreader rdWordforms;
const char* szFile = dFileInfos[i].m_sFilename.cstr();
CSphString sError;
if ( !rdWordforms.Open ( szFile, sError ) )
{
sphWarning ( "table '%s': %s", szIndex, sError.cstr() );
return nullptr;
}
int iLen;
while ( ( iLen = rdWordforms.GetLine ( sBuffer, sizeof ( sBuffer ) ) ) >= 0 )
AddWordform ( pContainer.get(), sBuffer, iLen, pMyTokenizer, szFile, dBlended, i );
}
}
return pContainer.release();
}
bool TemplateDictTraits_c::LoadWordforms ( const StrVec_t& dFiles, const CSphEmbeddedFiles* pEmbedded, const TokenizerRefPtr_c& pTokenizer, const char* szIndex )
{
if ( pEmbedded )
{
m_dWFFileInfos.Resize ( pEmbedded->m_dWordformFiles.GetLength() );
ARRAY_FOREACH ( i, m_dWFFileInfos )
m_dWFFileInfos[i] = pEmbedded->m_dWordformFiles[i];
} else
{
m_dWFFileInfos.Reserve ( dFiles.GetLength() );
CSphSavedFile tFile;
for ( const auto& sFile : dFiles )
if ( !sFile.IsEmpty() )
{
if ( tFile.Collect ( sFile.cstr() ) )
m_dWFFileInfos.Add ( tFile );
else
sphWarning ( "table '%s': wordforms file '%s' not found", szIndex, sFile.cstr() );
}
}
if ( !m_dWFFileInfos.GetLength() )
return false;
SweepWordformContainers ( m_dWFFileInfos );
m_pWordforms = GetWordformContainer ( m_dWFFileInfos, pEmbedded ? &( pEmbedded->m_dWordforms ) : nullptr, pTokenizer, szIndex );
if ( m_pWordforms )
{
++m_pWordforms->m_iRefCount;
if ( m_pWordforms->m_bHavePostMorphNF && !m_dMorph.GetLength() )
sphWarning ( "table '%s': wordforms contain post-morphology normal forms, but no morphology was specified", szIndex );
}
return !!m_pWordforms;
}
void TemplateDictTraits_c::WriteWordforms ( Writer_i & tWriter ) const
{
if ( !m_pWordforms )
{
tWriter.PutDword ( 0 );
return;
}
int nMultiforms = 0;
if ( m_pWordforms->m_pMultiWordforms )
for ( const auto& tMF : m_pWordforms->m_pMultiWordforms->m_Hash )
nMultiforms += tMF.second ? tMF.second->m_pForms.GetLength() : 0;
tWriter.PutDword ( m_pWordforms->m_hHash.GetLength() + nMultiforms );
GtEscapedBuilder sLine;
for ( const auto & tForm : m_pWordforms->m_hHash )
{
sLine.Clear();
if ( m_pWordforms->m_dNormalForms[tForm.second].m_bAfterMorphology )
sLine << "~";
sLine.AppendEscapedWithCommaNoQuotes ( tForm.first.cstr() );
sLine.Appendf ( " > %s", m_pWordforms->m_dNormalForms[tForm.second].m_sWord.cstr() );
tWriter.PutString ( sLine.cstr() );
}
if ( m_pWordforms->m_pMultiWordforms )
{
for ( const auto & tMultiForm : m_pWordforms->m_pMultiWordforms->m_Hash )
{
CSphMultiforms * pMF = tMultiForm.second;
if ( !pMF )
continue;
for ( const auto & i : pMF->m_pForms )
{
CSphString sTokens, sForms;
ConcatReportStrings ( i->m_dTokens, sTokens );
ConcatReportStrings ( i->m_dNormalForm, sForms );
sLine.Clear();
sLine.AppendEscapedWithCommaNoQuotes ( tMultiForm.first.cstr() );
sLine << " ";
sLine.AppendEscapedWithCommaNoQuotes ( sTokens.cstr() );
sLine.Appendf ( " > %s", sForms.cstr() );
tWriter.PutString ( sLine.cstr() );
}
}
}
}
void TemplateDictTraits_c::WriteWordforms ( JsonEscapedBuilder& tOut ) const
{
if ( !m_pWordforms )
return;
bool bHaveData = ( m_pWordforms->m_hHash.GetLength() != 0 );
using HASHIT = std::pair<CSphString, CSphMultiforms*>;
auto& pMulti = m_pWordforms->m_pMultiWordforms; // shortcut
if ( pMulti )
bHaveData |= ::any_of ( pMulti->m_Hash, [] ( const HASHIT& tMF ) { return tMF.second && !tMF.second->m_pForms.IsEmpty(); } );
if ( !bHaveData )
return;
tOut.Named ( "word_forms" );
auto _ = tOut.ArrayW();
GtEscapedBuilder sLine;
if ( m_pWordforms->m_hHash.GetLength() )
{
for ( const auto & tForm : m_pWordforms->m_hHash )
{
sLine.Clear();
if ( m_pWordforms->m_dNormalForms[tForm.second].m_bAfterMorphology )
sLine << "~";
sLine.AppendEscapedWithCommaNoQuotes ( tForm.first.cstr() );
sLine.Appendf ( " > %s", m_pWordforms->m_dNormalForms[tForm.second].m_sWord.cstr() );
tOut.FixupSpacedAndAppendEscaped ( sLine.cstr() );
}
}
if ( !pMulti )
return;
for ( const HASHIT & tForms : pMulti->m_Hash )
{
if ( !tForms.second )
continue;
for ( const CSphMultiform * pMF : tForms.second->m_pForms )
{
CSphString sTokens, sForms;
ConcatReportStrings ( pMF->m_dTokens, sTokens );
ConcatReportStrings ( pMF->m_dNormalForm, sForms );
sLine.Clear();
sLine.AppendEscapedWithCommaNoQuotes ( tForms.first.cstr() );
sLine << " ";
sLine.AppendEscapedWithCommaNoQuotes ( sTokens.cstr() );
sLine.Appendf ( " > %s", sForms.cstr() );
tOut.FixupSpacedAndAppendEscaped ( sLine.cstr() );
}
}
}
int TemplateDictTraits_c::SetMorphology ( const char* szMorph, CSphString& sMessage )
{
m_dMorph.Reset();
#if WITH_STEMMER
for ( void* pStemmer : m_dStemmers )
sb_stemmer_delete ( (sb_stemmer*)pStemmer );
m_dStemmers.Reset();
#endif
if ( !szMorph )
return ST_OK;
CSphString sOption = szMorph;
sOption.ToLower();
CSphString sError;
int iRes = ParseMorphology ( sOption.cstr(), sMessage );
if ( iRes == ST_WARNING && sMessage.IsEmpty() )
sMessage.SetSprintf ( "invalid morphology option %s; skipped", sOption.cstr() );
return iRes;
}
bool TemplateDictTraits_c::HasMorphology() const
{
return ( m_dMorph.GetLength() > 0 );
}
/// common id-based stemmer
bool TemplateDictTraits_c::StemById ( BYTE* pWord, int iStemmer ) const
{
char szBuf[MAX_KEYWORD_BYTES];
// safe quick strncpy without (!) padding and with a side of strlen
char* p = szBuf;
char* pMax = szBuf + sizeof ( szBuf ) - 1;
BYTE* pLastSBS = nullptr;
while ( *pWord && p < pMax )
{
pLastSBS = ( *pWord ) < 0x80 ? pWord : pLastSBS;
*p++ = *pWord++;
}
int iLen = int ( p - szBuf );
*p = '\0';
pWord -= iLen;
switch ( (EMORPH)iStemmer )
{
case EMORPH::STEM_EN:
stem_en ( pWord, iLen );
break;
case EMORPH::STEM_RU_UTF8:
// skip stemming in case of SBC at the end of the word
if ( pLastSBS && ( pLastSBS - pWord + 1 ) >= iLen )
break;
// stem only UTF8 tail
if ( !pLastSBS )
{
stem_ru_utf8 ( (WORD*)pWord );
} else
{
stem_ru_utf8 ( (WORD*)( pLastSBS + 1 ) );
}
break;
case EMORPH::STEM_CZ:
stem_cz ( pWord );
break;
case EMORPH::STEM_AR_UTF8:
stem_ar_utf8 ( pWord );
break;
case EMORPH::SOUNDEX:
stem_soundex ( pWord );
break;
case EMORPH::METAPHONE_UTF8:
stem_dmetaphone ( pWord );
break;
case EMORPH::AOTLEMMER_RU_UTF8:
sphAotLemmatizeRuUTF8 ( pWord );
break;
case EMORPH::AOTLEMMER_EN:
sphAotLemmatize ( pWord, AOT_EN );
break;
case EMORPH::AOTLEMMER_DE_UTF8:
sphAotLemmatizeDeUTF8 ( pWord );
break;
case EMORPH::AOTLEMMER_UK:
sphAotLemmatizeUk ( pWord, m_tLemmatizer.get() );
break;
case EMORPH::AOTLEMMER_RU_ALL:
case EMORPH::AOTLEMMER_EN_ALL:
case EMORPH::AOTLEMMER_DE_ALL:
case EMORPH::AOTLEMMER_UK_ALL:
// do the real work somewhere else
// this is mostly for warning suppressing and making some features like
// index_exact_words=1 vs expand_keywords=1 work
break;
default:
#if WITH_STEMMER
if ( iStemmer >= (int)EMORPH::LIBSTEMMER_FIRST && iStemmer < (int)EMORPH::LIBSTEMMER_LAST )
{
auto* pStemmer = (sb_stemmer*)m_dStemmers[iStemmer - (int)EMORPH::LIBSTEMMER_FIRST];
assert ( pStemmer );
const sb_symbol* sStemmed = sb_stemmer_stem ( pStemmer, (sb_symbol*)pWord, (int)strlen ( (const char*)pWord ) );
int iStemmedLen = sb_stemmer_length ( pStemmer );
memcpy ( pWord, sStemmed, iStemmedLen );
pWord[iStemmedLen] = '\0';
} else
return false;
break;
#else
return false;
#endif
}
return strcmp ( (char*)pWord, szBuf ) != 0;
}
void sphShutdownWordforms()
{
CSphVector<CSphSavedFile> dEmptyFiles;
TemplateDictTraits_c::SweepWordformContainers ( dEmptyFiles );
}
void SetupLemmatizerBase()
{
g_sLemmatizerBase = GET_FULL_SHARE_DIR();
}
| 37,010
|
C++
|
.cpp
| 1,056
| 31.875947
| 200
| 0.684701
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,984
|
dict_base.cpp
|
manticoresoftware_manticoresearch/src/dict/dict_base.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "dict_base.h"
#include "fileutils.h"
#include "sphinx.h"
#include "tokenizer/tokenizer.h"
void CSphDict::DictBegin ( CSphAutofile&, CSphAutofile&, int ) {}
void CSphDict::SortedDictBegin ( CSphAutofile&, int, int ) {}
void CSphDict::DictEntry ( const DictEntry_t& ) {}
void CSphDict::DictEndEntries ( SphOffset_t ) {}
bool CSphDict::DictEnd ( DictHeader_t*, int, CSphString& )
{
return true;
}
bool CSphDict::DictIsError() const
{
return true;
}
DictRefPtr_c GetStatelessDict ( const DictRefPtr_c& pDict )
{
if ( !pDict )
return nullptr;
if ( pDict->HasState() )
return pDict->Clone();
return pDict;
}
///////////////////////////////////////////////////////////////////////
void SetupDictionary ( DictRefPtr_c& pDict, const CSphDictSettings& tSettings, const CSphEmbeddedFiles* pFiles, const TokenizerRefPtr_c& pTokenizer, const char* szIndex, bool bStripFile, FilenameBuilder_i* pFilenameBuilder, CSphString& sError )
{
assert ( pTokenizer );
pDict->Setup ( tSettings );
if ( pDict->SetMorphology ( tSettings.m_sMorphology.cstr(), sError ) == CSphDict::ST_ERROR )
{
pDict = nullptr;
return;
}
if ( pFiles && pFiles->m_bEmbeddedStopwords )
pDict->LoadStopwords ( pFiles->m_dStopwords );
else
{
CSphString sStopwordFile = tSettings.m_sStopwords;
if ( !sStopwordFile.IsEmpty() )
pDict->LoadStopwords ( sStopwordFile.cstr(), pFilenameBuilder, pTokenizer, bStripFile );
}
StrVec_t dWordformFiles;
if ( pFilenameBuilder )
{
dWordformFiles.Resize ( tSettings.m_dWordforms.GetLength() );
ARRAY_FOREACH ( i, tSettings.m_dWordforms )
dWordformFiles[i] = pFilenameBuilder->GetFullPath ( tSettings.m_dWordforms[i] );
}
pDict->LoadWordforms ( pFilenameBuilder ? dWordformFiles : tSettings.m_dWordforms, pFiles && pFiles->m_bEmbeddedWordforms ? pFiles : nullptr, pTokenizer, szIndex );
}
| 2,316
|
C++
|
.cpp
| 62
| 35.435484
| 244
| 0.727232
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,985
|
dict_keywords.cpp
|
manticoresoftware_manticoresearch/src/dict/dict_keywords.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "dict_crc.h"
#include "bin.h"
#include "dict/infix/infix_builder.h"
//////////////////////////////////////////////////////////////////////////
// KEYWORDS STORING DICTIONARY
//////////////////////////////////////////////////////////////////////////
/// binary search for the first hit with wordid greater than or equal to reference
static CSphWordHit* FindFirstGte ( CSphWordHit* pHits, int iHits, SphWordID_t uID )
{
if ( pHits->m_uWordID == uID )
return pHits;
CSphWordHit* pL = pHits;
CSphWordHit* pR = pHits + iHits - 1;
if ( pL->m_uWordID > uID || pR->m_uWordID < uID )
return NULL;
while ( pR - pL != 1 )
{
CSphWordHit* pM = pL + ( pR - pL ) / 2;
if ( pM->m_uWordID < uID )
pL = pM;
else
pR = pM;
}
assert ( pR - pL == 1 );
assert ( pL->m_uWordID < uID );
assert ( pR->m_uWordID >= uID );
return pR;
}
//////////////////////////////////////////////////////////////////////////
struct KeywordDictFinalization_t : public ISphNonCopyMovable
{
std::unique_ptr<ISphInfixBuilder> m_pInfixer;
int m_iWords = 0;
CSphKeywordDeltaWriter m_tLastKeyword;
};
class CSphDictKeywords final: public CSphDictCRC<CRCALGO::CRC32>
{
using BASE = CSphDictCRC<CRCALGO::CRC32>;
private:
// OPTIMIZE? change pointers to 8:24 locators to save RAM on x64 gear?
struct HitblockKeyword_t
{
SphWordID_t m_uWordid; // locally unique word id (crc value, adjusted in case of collision)
HitblockKeyword_t* m_pNextHash; // next hashed entry
char* m_pKeyword; // keyword
};
struct HitblockException_t
{
HitblockKeyword_t* m_pEntry; // hash entry
SphWordID_t m_uCRC; // original unadjusted crc
bool operator<( const HitblockException_t& rhs ) const
{
return m_pEntry->m_uWordid < rhs.m_pEntry->m_uWordid;
}
};
struct DictKeyword_t
{
char* m_sKeyword;
SphOffset_t m_uOff;
int m_iDocs;
int m_iHits;
BYTE m_uHint;
BYTE m_uKeywordLen;
int64_t m_iSkiplistPos; ///< position in .spe file
};
struct DictBlock_t
{
SphOffset_t m_iPos;
int m_iLen;
};
public:
explicit CSphDictKeywords();
void HitblockBegin() final { m_bHitblock = true; }
void HitblockPatch ( CSphWordHit* pHits, int iHits ) const final;
const char* HitblockGetKeyword ( SphWordID_t uWordID ) final;
int HitblockGetMemUse() final { return m_iMemUse; }
void HitblockReset() final;
void DictBegin ( CSphAutofile& tTempDict, CSphAutofile& tDict, int iDictLimit ) final;
void SortedDictBegin ( CSphAutofile& tDict, int iDictLimit, int iInfixCodepointBytes ) final;
void DictEntry ( const DictEntry_t& tEntry ) final;
void DictEndEntries ( SphOffset_t ) final {};
bool DictEnd ( DictHeader_t* pHeader, int iMemLimit, CSphString& sError ) final;
SphWordID_t GetWordID ( BYTE* pWord ) final;
SphWordID_t GetWordIDWithMarkers ( BYTE* pWord ) final;
SphWordID_t GetWordIDNonStemmed ( BYTE* pWord ) final;
SphWordID_t GetWordID ( const BYTE* pWord, int iLen, bool bFilterStops ) final;
DictRefPtr_c Clone() const final { return CloneBase ( new CSphDictKeywords() ); }
/// full crc and keyword check
inline static bool FullIsLess ( const HitblockException_t& a, const HitblockException_t& b ) noexcept
{
if ( a.m_uCRC != b.m_uCRC )
return a.m_uCRC < b.m_uCRC;
return strcmp ( a.m_pEntry->m_pKeyword, b.m_pEntry->m_pKeyword ) < 0;
}
protected:
~CSphDictKeywords() final;
private:
struct DictKeywordTagged_t: public DictKeyword_t
{
int m_iBlock;
static inline bool IsLess ( const DictKeywordTagged_t& a, const DictKeywordTagged_t& b );
};
static const int SLOTS = 65536;
static const int ENTRY_CHUNK = 65536;
static const int KEYWORD_CHUNK = 1048576;
static const int DICT_CHUNK = 65536;
HitblockKeyword_t* m_dHash[SLOTS]; ///< hash by wordid (!)
CSphVector<HitblockException_t> m_dExceptions;
bool m_bHitblock; ///< should we store words on GetWordID or not
int m_iMemUse; ///< current memory use by all the chunks
int m_iDictLimit; ///< allowed memory limit for dict block collection
CSphVector<HitblockKeyword_t*> m_dEntryChunks; ///< hash chunks, only used when indexing hitblocks
HitblockKeyword_t* m_pEntryChunk;
int m_iEntryChunkFree;
CSphVector<BYTE*> m_dKeywordChunks; ///< keyword storage
BYTE* m_pKeywordChunk;
int m_iKeywordChunkFree;
CSphVector<DictKeyword_t*> m_dDictChunks; ///< dict entry chunks, only used when sorting final dict
DictKeyword_t* m_pDictChunk;
int m_iDictChunkFree;
int m_iTmpFD; ///< temp dict file descriptor
CSphWriter m_wrTmpDict; ///< temp dict writer
CSphVector<DictBlock_t> m_dDictBlocks; ///< on-disk locations of dict entry blocks
std::array<char, MAX_KEYWORD_BYTES> m_sClippedWord; ///< keyword storage for clipped word
std::unique_ptr<KeywordDictFinalization_t> m_pFinalizer;
private:
SphWordID_t HitblockGetID ( const char* pWord, int iLen, SphWordID_t uCRC );
HitblockKeyword_t* HitblockAddKeyword ( DWORD uHash, const char* pWord, int iLen, SphWordID_t uID );
inline bool IsSorted() const noexcept { return m_iDictLimit==0; }
void DictReadEntry ( CSphBin& dBin, DictKeywordTagged_t& tEntry, BYTE* pKeyword );
void DictFlush();
void DictEntryNonSorted ( const DictEntry_t& tEntry );
bool SortedDictEnd ( DictHeader_t* pHeader, CSphString& sError );
};
//////////////////////////////////////////////////////////////////////////
CSphDictKeywords::CSphDictKeywords()
: m_bHitblock ( false )
, m_iMemUse ( 0 )
, m_iDictLimit ( 0 )
, m_pEntryChunk ( nullptr )
, m_iEntryChunkFree ( 0 )
, m_pKeywordChunk ( nullptr )
, m_iKeywordChunkFree ( 0 )
, m_pDictChunk ( nullptr )
, m_iDictChunkFree ( 0 )
, m_iTmpFD ( -1 )
{
memset ( m_dHash, 0, sizeof ( m_dHash ) );
}
CSphDictKeywords::~CSphDictKeywords()
{
HitblockReset();
}
void CSphDictKeywords::HitblockReset()
{
m_dExceptions.Resize ( 0 );
ARRAY_FOREACH ( i, m_dEntryChunks )
SafeDeleteArray ( m_dEntryChunks[i] );
m_dEntryChunks.Resize ( 0 );
m_pEntryChunk = NULL;
m_iEntryChunkFree = 0;
ARRAY_FOREACH ( i, m_dKeywordChunks )
SafeDeleteArray ( m_dKeywordChunks[i] );
m_dKeywordChunks.Resize ( 0 );
m_pKeywordChunk = NULL;
m_iKeywordChunkFree = 0;
m_iMemUse = 0;
memset ( m_dHash, 0, sizeof ( m_dHash ) );
}
CSphDictKeywords::HitblockKeyword_t* CSphDictKeywords::HitblockAddKeyword ( DWORD uHash, const char* sWord, int iLen, SphWordID_t uID )
{
assert ( iLen < MAX_KEYWORD_BYTES );
// alloc entry
if ( !m_iEntryChunkFree )
{
m_pEntryChunk = new HitblockKeyword_t[ENTRY_CHUNK];
m_iEntryChunkFree = ENTRY_CHUNK;
m_dEntryChunks.Add ( m_pEntryChunk );
m_iMemUse += sizeof ( HitblockKeyword_t ) * ENTRY_CHUNK;
}
HitblockKeyword_t* pEntry = m_pEntryChunk++;
m_iEntryChunkFree--;
// alloc keyword
iLen++;
if ( m_iKeywordChunkFree < iLen )
{
m_pKeywordChunk = new BYTE[KEYWORD_CHUNK];
m_iKeywordChunkFree = KEYWORD_CHUNK;
m_dKeywordChunks.Add ( m_pKeywordChunk );
m_iMemUse += KEYWORD_CHUNK;
}
// fill it
memcpy ( m_pKeywordChunk, sWord, iLen );
m_pKeywordChunk[iLen - 1] = '\0';
pEntry->m_pKeyword = (char*)m_pKeywordChunk;
pEntry->m_uWordid = uID;
m_pKeywordChunk += iLen;
m_iKeywordChunkFree -= iLen;
// mtf it
pEntry->m_pNextHash = m_dHash[uHash];
m_dHash[uHash] = pEntry;
return pEntry;
}
SphWordID_t CSphDictKeywords::HitblockGetID ( const char* sWord, int iLen, SphWordID_t uCRC )
{
if ( iLen > MAX_KEYWORD_BYTES - 4 ) // fix of very long word (zones)
{
memcpy ( m_sClippedWord.data(), sWord, MAX_KEYWORD_BYTES - 4 );
memset ( m_sClippedWord.data() + MAX_KEYWORD_BYTES - 4, 0, 4 );
CSphString sOrig;
sOrig.SetBinary ( sWord, iLen );
sphWarn ( "word overrun buffer, clipped!!!\n"
"clipped (len=%d, word='%s')\noriginal (len=%d, word='%s')",
MAX_KEYWORD_BYTES - 4,
m_sClippedWord.data(),
iLen,
sOrig.cstr() );
sWord = m_sClippedWord.data();
iLen = MAX_KEYWORD_BYTES - 4;
uCRC = sphCRC32 ( m_sClippedWord.data(), MAX_KEYWORD_BYTES - 4 );
}
// is this a known one? find it
// OPTIMIZE? in theory, we could use something faster than crc32; but quick lookup3 test did not show any improvements
const DWORD uHash = (DWORD)( uCRC % SLOTS );
HitblockKeyword_t* pEntry = m_dHash[uHash];
HitblockKeyword_t** ppEntry = &m_dHash[uHash];
while ( pEntry )
{
// check crc
if ( pEntry->m_uWordid != uCRC )
{
// crc mismatch, try next entry
ppEntry = &pEntry->m_pNextHash;
pEntry = pEntry->m_pNextHash;
continue;
}
// crc matches, check keyword
int iWordLen = iLen;
const char* a = pEntry->m_pKeyword;
const char* b = sWord;
while ( *a == *b && iWordLen-- )
{
if ( !*a || !iWordLen )
{
// known word, mtf it, and return id
( *ppEntry ) = pEntry->m_pNextHash;
pEntry->m_pNextHash = m_dHash[uHash];
m_dHash[uHash] = pEntry;
return pEntry->m_uWordid;
}
++a;
++b;
}
// collision detected!
// our crc is taken as a wordid, but keyword does not match
// welcome to the land of very tricky magic
//
// pEntry might either be a known exception, or a regular keyword
// sWord might either be a known exception, or a new one
// if they are not known, they needed to be added as exceptions now
//
// in case sWord is new, we need to assign a new unique wordid
// for that, we keep incrementing the crc until it is unique
// a starting point for wordid search loop would be handy
//
// let's scan the exceptions vector and work on all this
//
// NOTE, beware of the order, it is wordid asc, which does NOT guarantee crc asc
// example, assume crc(w1)==X, crc(w2)==X+1, crc(w3)==X (collides with w1)
// wordids will be X, X+1, X+2 but crcs will be X, X+1, X
//
// OPTIMIZE, might make sense to use binary search
// OPTIMIZE, add early out somehow
SphWordID_t uWordid = uCRC + 1;
const int iExcLen = m_dExceptions.GetLength();
int iExc = m_dExceptions.GetLength();
ARRAY_FOREACH ( i, m_dExceptions )
{
const HitblockKeyword_t* pExcWord = m_dExceptions[i].m_pEntry;
// incoming word is a known exception? just return the pre-assigned wordid
if ( m_dExceptions[i].m_uCRC == uCRC && strncmp ( pExcWord->m_pKeyword, sWord, iLen ) == 0 )
return pExcWord->m_uWordid;
// incoming word collided into a known exception? clear the matched entry; no need to re-add it (see below)
if ( pExcWord == pEntry )
pEntry = nullptr;
// find first exception with wordid greater or equal to our candidate
if ( pExcWord->m_uWordid >= uWordid && iExc == iExcLen )
iExc = i;
}
// okay, this is a new collision
// if entry was a regular word, we have to add it
if ( pEntry )
{
m_dExceptions.Add();
m_dExceptions.Last().m_pEntry = pEntry;
m_dExceptions.Last().m_uCRC = uCRC;
}
// need to assign a new unique wordid now
// keep scanning both exceptions and keywords for collisions
while ( true )
{
// iExc must be either the first exception greater or equal to current candidate, or out of bounds
assert ( iExc == iExcLen || m_dExceptions[iExc].m_pEntry->m_uWordid >= uWordid );
assert ( iExc == 0 || m_dExceptions[iExc - 1].m_pEntry->m_uWordid < uWordid );
// candidate collides with a known exception? increment it, and keep looking
if ( iExc < iExcLen && m_dExceptions[iExc].m_pEntry->m_uWordid == uWordid )
{
++uWordid;
while ( iExc < iExcLen && m_dExceptions[iExc].m_pEntry->m_uWordid < uWordid )
++iExc;
continue;
}
// candidate collides with a keyword? must be a regular one; add it as an exception, and keep looking
HitblockKeyword_t* pCheck = m_dHash[(DWORD)( uWordid % SLOTS )];
while ( pCheck )
{
if ( pCheck->m_uWordid == uWordid )
break;
pCheck = pCheck->m_pNextHash;
}
// no collisions; we've found our unique wordid!
if ( !pCheck )
break;
// got a collision; add it
HitblockException_t& tColl = m_dExceptions.Add();
tColl.m_pEntry = pCheck;
tColl.m_uCRC = pCheck->m_uWordid; // not a known exception; hence, wordid must equal crc
// and keep looking
++uWordid;
continue;
}
// and finally, we have that precious new wordid
// so hash our new unique under its new unique adjusted wordid
pEntry = HitblockAddKeyword ( (DWORD)( uWordid % SLOTS ), sWord, iLen, uWordid );
// add it as a collision too
m_dExceptions.Add();
m_dExceptions.Last().m_pEntry = pEntry;
m_dExceptions.Last().m_uCRC = uCRC;
// keep exceptions list sorted by wordid
m_dExceptions.Sort();
return pEntry->m_uWordid;
}
// new keyword with unique crc
pEntry = HitblockAddKeyword ( uHash, sWord, iLen, uCRC );
return pEntry->m_uWordid;
}
inline bool CSphDictKeywords::DictKeywordTagged_t::IsLess ( const DictKeywordTagged_t& a, const DictKeywordTagged_t& b )
{
return strcmp ( a.m_sKeyword, b.m_sKeyword ) < 0;
};
void CSphDictKeywords::DictReadEntry ( CSphBin& dBin, DictKeywordTagged_t& tEntry, BYTE* pKeyword )
{
int iKeywordLen = dBin.ReadByte();
if ( iKeywordLen < 0 )
{
// early eof or read error; flag must be raised
assert ( dBin.IsError() );
return;
}
assert ( iKeywordLen > 0 && iKeywordLen < MAX_KEYWORD_BYTES - 1 );
if ( dBin.ReadBytes ( pKeyword, iKeywordLen ) != BIN_READ_OK )
{
assert ( dBin.IsError() );
return;
}
pKeyword[iKeywordLen] = '\0';
assert ( m_iSkiplistBlockSize > 0 );
tEntry.m_sKeyword = (char*)pKeyword;
tEntry.m_uKeywordLen = iKeywordLen;
tEntry.m_uOff = dBin.UnzipOffset();
tEntry.m_iDocs = dBin.UnzipInt();
tEntry.m_iHits = dBin.UnzipInt();
tEntry.m_uHint = (BYTE)dBin.ReadByte();
if ( tEntry.m_iDocs > m_iSkiplistBlockSize )
tEntry.m_iSkiplistPos = dBin.UnzipOffset();
else
tEntry.m_iSkiplistPos = 0;
}
void CSphDictKeywords::DictBegin ( CSphAutofile& tTempDict, CSphAutofile& tDict, int iDictLimit )
{
m_iTmpFD = tTempDict.GetFD();
m_iDictLimit = Max ( iDictLimit, KEYWORD_CHUNK + DICT_CHUNK * (int)sizeof ( DictKeyword_t ) ); // can't use less than 1 chunk
m_wrTmpDict.CloseFile();
m_wrTmpDict.SetBufferSize(m_iDictLimit);
m_wrTmpDict.SetFile ( tTempDict, nullptr, m_sWriterError );
m_wrDict.CloseFile();
m_wrDict.SetFile ( tDict, nullptr, m_sWriterError );
m_wrDict.PutByte ( 1 );
}
void CSphDictKeywords::SortedDictBegin ( CSphAutofile& tDict, int iDictLimit, int iInfixCodepointBytes )
{
m_iTmpFD = -1;
m_wrDict.CloseFile();
m_wrDict.SetFile ( tDict, nullptr, m_sWriterError );
m_wrDict.PutByte ( 1 );
assert ( m_wrDict.GetPos() == 1 );
m_iDictLimit = 0; // 0 assumes we have sorted
m_pFinalizer = std::make_unique<KeywordDictFinalization_t>();
CSphString sError;
m_pFinalizer->m_pInfixer = sphCreateInfixBuilder ( iInfixCodepointBytes, &sError );
assert ( sError.IsEmpty() );
}
bool CSphDictKeywords::DictEnd ( DictHeader_t* pHeader, int iMemLimit, CSphString& sError )
{
if ( IsSorted() )
return SortedDictEnd ( pHeader, sError );
assert ( !IsSorted() );
DictFlush();
m_wrTmpDict.CloseFile(); // tricky: file is not owned, so it won't get closed, and iTmpFD won't get invalidated
if ( m_dDictBlocks.IsEmpty() )
m_wrDict.CloseFile();
if ( m_wrTmpDict.IsError() || m_wrDict.IsError() )
{
sError.SetSprintf ( "dictionary write error (out of space?)" );
return false;
}
if ( m_dDictBlocks.IsEmpty() )
{
pHeader->m_iDictCheckpointsOffset = m_wrDict.GetPos();
pHeader->m_iDictCheckpoints = 0;
return true;
}
m_pFinalizer = std::make_unique<KeywordDictFinalization_t>();
// infix builder, if needed
m_pFinalizer->m_pInfixer = sphCreateInfixBuilder ( pHeader->m_iInfixCodepointBytes, &sError );
if ( !sError.IsEmpty() )
return false;
assert ( m_iSkiplistBlockSize > 0 );
// initialize readers
RawVector_T<CSphBin> dBins;
dBins.Reserve_static ( m_dDictBlocks.GetLength() );
int iMaxBlock = 0;
ARRAY_FOREACH ( i, m_dDictBlocks )
iMaxBlock = Max ( iMaxBlock, m_dDictBlocks[i].m_iLen );
iMemLimit = Max ( iMemLimit, iMaxBlock * m_dDictBlocks.GetLength() );
int iBinSize = CSphBin::CalcBinSize ( iMemLimit, m_dDictBlocks.GetLength(), "sort_dict" );
SphOffset_t iSharedOffset = -1;
ARRAY_FOREACH ( i, m_dDictBlocks )
{
auto& dBin = dBins.Add();
dBin.m_iFileLeft = m_dDictBlocks[i].m_iLen;
dBin.m_iFilePos = m_dDictBlocks[i].m_iPos;
dBin.Init ( m_iTmpFD, &iSharedOffset, iBinSize );
}
// keywords storage
CSphFixedVector<BYTE> dKeywords { MAX_KEYWORD_BYTES * dBins.GetLength() };
BYTE* pKeywords = dKeywords.begin();
// do the sort
CSphQueue<DictKeywordTagged_t, DictKeywordTagged_t> qWords ( dBins.GetLength() );
DictKeywordTagged_t tEntry;
ARRAY_FOREACH ( i, dBins )
{
DictReadEntry ( dBins[i], tEntry, pKeywords + i * MAX_KEYWORD_BYTES );
if ( dBins[i].IsError() )
{
sError.SetSprintf ( "entry read error in dictionary sort (bin %d of %d)", i, dBins.GetLength() );
return false;
}
tEntry.m_iBlock = i;
qWords.Push ( tEntry );
}
bool bHasMorphology = HasMorphology();
int iWords = 0;
while ( qWords.GetLength() )
{
const DictKeywordTagged_t& tWord = qWords.Root();
auto iLen = (const int)tWord.m_uKeywordLen;
// store checkpoints as needed
if ( ( iWords % SPH_WORDLIST_CHECKPOINT ) == 0 )
{
// emit a checkpoint, unless we're at the very dict beginning
if ( iWords )
{
m_wrDict.ZipInt ( 0 );
m_wrDict.ZipInt ( 0 );
}
BYTE* szClone = new BYTE[iLen + 1]; // OPTIMIZE? pool these?
memcpy ( szClone, tWord.m_sKeyword, iLen );
szClone[iLen] = '\0';
CSphWordlistCheckpoint& tCheckpoint = m_dCheckpoints.Add();
tCheckpoint.m_szWord = (char*)szClone;
tCheckpoint.m_iWordlistOffset = m_wrDict.GetPos();
m_pFinalizer->m_tLastKeyword.Reset();
}
++iWords;
// write final dict entry
assert ( iLen );
assert ( tWord.m_uOff );
assert ( tWord.m_iDocs );
assert ( tWord.m_iHits );
m_pFinalizer->m_tLastKeyword.PutDelta ( m_wrDict, (const BYTE*)tWord.m_sKeyword, iLen );
m_wrDict.ZipOffset ( tWord.m_uOff );
m_wrDict.ZipInt ( tWord.m_iDocs );
m_wrDict.ZipInt ( tWord.m_iHits );
if ( tWord.m_uHint )
m_wrDict.PutByte ( tWord.m_uHint );
if ( tWord.m_iDocs > m_iSkiplistBlockSize )
m_wrDict.ZipOffset ( tWord.m_iSkiplistPos );
// build infixes
if ( m_pFinalizer->m_pInfixer )
m_pFinalizer->m_pInfixer->AddWord ( (const BYTE*)tWord.m_sKeyword, iLen, m_dCheckpoints.GetLength(), bHasMorphology );
// next
int iBin = tWord.m_iBlock;
qWords.Pop();
if ( !dBins[iBin].IsDone() )
{
DictReadEntry ( dBins[iBin], tEntry, pKeywords + iBin * MAX_KEYWORD_BYTES );
if ( dBins[iBin].IsError() )
{
sError.SetSprintf ( "entry read error in dictionary sort (bin %d of %d)", iBin, dBins.GetLength() );
return false;
}
tEntry.m_iBlock = iBin;
qWords.Push ( tEntry );
}
}
return SortedDictEnd ( pHeader, sError );
}
bool CSphDictKeywords::SortedDictEnd ( DictHeader_t * pHeader, CSphString& sError )
{
assert ( m_pFinalizer );
// end of dictionary block
m_wrDict.ZipInt ( 0 );
m_wrDict.ZipInt ( 0 );
// flush infix hash entries, if any
if ( m_pFinalizer->m_pInfixer )
m_pFinalizer->m_pInfixer->SaveEntries ( m_wrDict );
// flush wordlist checkpoints (blocks)
pHeader->m_iDictCheckpointsOffset = m_wrDict.GetPos();
pHeader->m_iDictCheckpoints = m_dCheckpoints.GetLength();
ARRAY_FOREACH ( i, m_dCheckpoints )
{
auto iLen = (const int)strlen ( m_dCheckpoints[i].m_szWord );
assert ( m_dCheckpoints[i].m_iWordlistOffset > 0 );
assert ( iLen > 0 && iLen < MAX_KEYWORD_BYTES );
m_wrDict.PutDword ( iLen );
m_wrDict.PutBytes ( m_dCheckpoints[i].m_szWord, iLen );
m_wrDict.PutOffset ( m_dCheckpoints[i].m_iWordlistOffset );
SafeDeleteArray ( m_dCheckpoints[i].m_szWord );
}
// flush infix hash blocks
if ( m_pFinalizer->m_pInfixer )
{
pHeader->m_iInfixBlocksOffset = m_pFinalizer->m_pInfixer->SaveEntryBlocks ( m_wrDict );
pHeader->m_iInfixBlocksWordsSize = m_pFinalizer->m_pInfixer->GetBlocksWordsSize();
if ( pHeader->m_iInfixBlocksOffset > UINT_MAX ) // FIXME!!! change to int64
sphDie ( "INTERNAL ERROR: dictionary size " INT64_FMT " overflow at dictend save", pHeader->m_iInfixBlocksOffset );
}
// cleanup stuff we no more need
m_dCheckpoints.Reset();
m_pFinalizer = nullptr;
// flush header
// mostly for debugging convenience
// primary storage is in the index wide header
m_wrDict.PutBlob ( g_sTagDictHeader );
m_wrDict.ZipInt ( pHeader->m_iDictCheckpoints );
m_wrDict.ZipOffset ( pHeader->m_iDictCheckpointsOffset );
m_wrDict.ZipInt ( pHeader->m_iInfixCodepointBytes );
m_wrDict.ZipInt ( (DWORD)pHeader->m_iInfixBlocksOffset );
// about it
m_wrDict.CloseFile();
if ( m_wrDict.IsError() )
sError.SetSprintf ( "dictionary write error (out of space?)" );
return !m_wrDict.IsError();
}
void CSphDictKeywords::DictFlush()
{
if ( !m_dDictChunks.GetLength() )
return;
assert ( m_dDictChunks.GetLength() && m_dKeywordChunks.GetLength() );
assert ( m_iSkiplistBlockSize > 0 );
// sort em
int iTotalWords = m_dDictChunks.GetLength() * DICT_CHUNK - m_iDictChunkFree;
CSphVector<DictKeyword_t*> dWords ( iTotalWords );
int iIdx = 0;
ARRAY_FOREACH ( i, m_dDictChunks )
{
int iWords = DICT_CHUNK;
if ( i == m_dDictChunks.GetLength() - 1 )
iWords -= m_iDictChunkFree;
DictKeyword_t* pWord = m_dDictChunks[i];
for ( int j = 0; j < iWords; j++ )
dWords[iIdx++] = pWord++;
}
dWords.Sort ( Lesser ( [] ( const CSphDictKeywords::DictKeyword_t* a, const CSphDictKeywords::DictKeyword_t* b ) noexcept {
return strcmp ( a->m_sKeyword, b->m_sKeyword ) < 0;
} ) );
// write em
DictBlock_t& tBlock = m_dDictBlocks.Add();
tBlock.m_iPos = m_wrTmpDict.GetPos();
for ( const DictKeyword_t* pWord : dWords )
{
auto iLen = (int)strlen ( pWord->m_sKeyword );
m_wrTmpDict.PutByte ( (BYTE)iLen );
m_wrTmpDict.PutBytes ( pWord->m_sKeyword, iLen );
m_wrTmpDict.ZipOffset ( pWord->m_uOff );
m_wrTmpDict.ZipInt ( pWord->m_iDocs );
m_wrTmpDict.ZipInt ( pWord->m_iHits );
m_wrTmpDict.PutByte ( pWord->m_uHint );
assert ( ( pWord->m_iDocs > m_iSkiplistBlockSize ) == ( pWord->m_iSkiplistPos != 0 ) );
if ( pWord->m_iDocs > m_iSkiplistBlockSize )
m_wrTmpDict.ZipOffset ( pWord->m_iSkiplistPos );
}
tBlock.m_iLen = (int)( m_wrTmpDict.GetPos() - tBlock.m_iPos );
// clean up buffers
m_dDictChunks.for_each ( [] ( auto& dChunk ) { delete[] ( dChunk ); } );
m_dDictChunks.Resize ( 0 );
m_pDictChunk = nullptr;
m_iDictChunkFree = 0;
m_dKeywordChunks.for_each ( [] ( auto& dChunk ) { delete[] ( dChunk ); } );
m_dKeywordChunks.Resize ( 0 );
m_pKeywordChunk = nullptr;
m_iKeywordChunkFree = 0;
m_iMemUse = 0;
}
void CSphDictKeywords::DictEntry ( const DictEntry_t& tEntry )
{
assert ( tEntry.m_iDocs );
assert ( tEntry.m_iHits );
assert ( tEntry.m_iDoclistOffset );
assert ( tEntry.m_iDoclistLength > 0 );
assert ( m_iSkiplistBlockSize > 0 );
if ( !IsSorted() )
return DictEntryNonSorted ( tEntry );
auto iLen = (int)strlen ( (const char*)tEntry.m_szKeyword );
// store checkpoints as needed
if ( ( m_pFinalizer->m_iWords % SPH_WORDLIST_CHECKPOINT ) == 0 )
{
// emit a checkpoint, unless we're at the very dict beginning
if ( m_pFinalizer->m_iWords )
{
m_wrDict.ZipInt ( 0 );
m_wrDict.ZipInt ( 0 );
}
auto* szClone = new BYTE[iLen + 1]; // OPTIMIZE? pool these?
memcpy ( szClone, tEntry.m_szKeyword, iLen );
szClone[iLen] = '\0';
CSphWordlistCheckpoint& tCheckpoint = m_dCheckpoints.Add();
tCheckpoint.m_szWord = (const char*)szClone;
tCheckpoint.m_iWordlistOffset = m_wrDict.GetPos();
m_pFinalizer->m_tLastKeyword.Reset();
}
++m_pFinalizer->m_iWords;
// write final dict entry
assert ( iLen );
m_pFinalizer->m_tLastKeyword.PutDelta( m_wrDict, (const BYTE*)tEntry.m_szKeyword, iLen );
m_wrDict.ZipOffset ( tEntry.m_iDoclistOffset );
m_wrDict.ZipInt ( tEntry.m_iDocs );
m_wrDict.ZipInt ( tEntry.m_iHits );
auto uHint = sphDoclistHintPack ( tEntry.m_iDocs, tEntry.m_iDoclistLength );
if ( uHint )
m_wrDict.PutByte ( uHint );
if ( tEntry.m_iDocs > m_iSkiplistBlockSize )
m_wrDict.ZipOffset ( tEntry.m_iSkiplistOffset );
// build infixes
if ( m_pFinalizer->m_pInfixer )
m_pFinalizer->m_pInfixer->AddWord ( (const BYTE*)tEntry.m_szKeyword, iLen, m_dCheckpoints.GetLength(), HasMorphology() );
}
// non-sorted case - we push all entries into huge temporary file, and then finalize it in DictEnd()
void CSphDictKeywords::DictEntryNonSorted ( const DictEntry_t& tEntry )
{
assert ( !IsSorted() );
DictKeyword_t* pWord = NULL;
auto iLen = (int)strlen ( (const char*)tEntry.m_szKeyword ) + 1;
while ( true )
{
// alloc dict entry
if ( !m_iDictChunkFree )
{
if ( m_iDictLimit && ( m_iMemUse + (int)sizeof ( DictKeyword_t ) * DICT_CHUNK ) > m_iDictLimit )
DictFlush();
m_pDictChunk = new DictKeyword_t[DICT_CHUNK];
m_iDictChunkFree = DICT_CHUNK;
m_dDictChunks.Add ( m_pDictChunk );
m_iMemUse += sizeof ( DictKeyword_t ) * DICT_CHUNK;
}
// alloc keyword
if ( m_iKeywordChunkFree < iLen )
{
if ( m_iDictLimit && ( m_iMemUse + KEYWORD_CHUNK ) > m_iDictLimit )
{
DictFlush();
continue; // because we just flushed pWord
}
m_pKeywordChunk = new BYTE[KEYWORD_CHUNK];
m_iKeywordChunkFree = KEYWORD_CHUNK;
m_dKeywordChunks.Add ( m_pKeywordChunk );
m_iMemUse += KEYWORD_CHUNK;
}
// aw kay
break;
}
pWord = m_pDictChunk++;
--m_iDictChunkFree;
pWord->m_sKeyword = (char*)m_pKeywordChunk;
memcpy ( m_pKeywordChunk, tEntry.m_szKeyword, iLen );
m_pKeywordChunk[iLen - 1] = '\0';
m_pKeywordChunk += iLen;
m_iKeywordChunkFree -= iLen;
pWord->m_uOff = tEntry.m_iDoclistOffset;
pWord->m_iDocs = tEntry.m_iDocs;
pWord->m_iHits = tEntry.m_iHits;
pWord->m_uHint = sphDoclistHintPack ( tEntry.m_iDocs, tEntry.m_iDoclistLength );
pWord->m_iSkiplistPos = 0;
if ( tEntry.m_iDocs > m_iSkiplistBlockSize )
pWord->m_iSkiplistPos = tEntry.m_iSkiplistOffset;
}
SphWordID_t CSphDictKeywords::GetWordID ( BYTE* pWord )
{
SphWordID_t uCRC = BASE::GetWordID ( pWord );
if ( !uCRC || !m_bHitblock )
return uCRC;
auto iLen = (int)strlen ( (const char*)pWord );
return HitblockGetID ( (const char*)pWord, iLen, uCRC );
}
SphWordID_t CSphDictKeywords::GetWordIDWithMarkers ( BYTE* pWord )
{
SphWordID_t uCRC = BASE::GetWordIDWithMarkers ( pWord );
if ( !uCRC || !m_bHitblock )
return uCRC;
auto iLen = (int)strlen ( (const char*)pWord );
return HitblockGetID ( (const char*)pWord, iLen, uCRC );
}
SphWordID_t CSphDictKeywords::GetWordIDNonStemmed ( BYTE* pWord )
{
SphWordID_t uCRC = BASE::GetWordIDNonStemmed ( pWord );
if ( !uCRC || !m_bHitblock )
return uCRC;
auto iLen = (int)strlen ( (const char*)pWord );
return HitblockGetID ( (const char*)pWord, iLen, uCRC );
}
SphWordID_t CSphDictKeywords::GetWordID ( const BYTE* pWord, int iLen, bool bFilterStops )
{
SphWordID_t uCRC = BASE::GetWordID ( pWord, iLen, bFilterStops );
if ( !uCRC || !m_bHitblock )
return uCRC;
return HitblockGetID ( (const char*)pWord, iLen, uCRC ); // !COMMIT would break, we kind of strcmp inside; but must never get called?
}
/// do hit block patching magic
void CSphDictKeywords::HitblockPatch ( CSphWordHit* pHits, int iHits ) const
{
if ( !pHits || iHits <= 0 )
return;
const CSphVector<HitblockException_t>& dExc = m_dExceptions; // shortcut
CSphVector<CSphWordHit*> dChunk;
// reorder hit chunks for exceptions (aka crc collisions)
for ( int iFirst = 0; iFirst < dExc.GetLength() - 1; )
{
// find next span of collisions, iFirst inclusive, iMax exclusive ie. [iFirst,iMax)
// (note that exceptions array is always sorted)
SphWordID_t uFirstWordid = dExc[iFirst].m_pEntry->m_uWordid;
assert ( dExc[iFirst].m_uCRC == uFirstWordid );
int iMax = iFirst + 1;
SphWordID_t uSpan = uFirstWordid + 1;
while ( iMax < dExc.GetLength() && dExc[iMax].m_pEntry->m_uWordid == uSpan )
{
iMax++;
uSpan++;
}
// check whether they are in proper order already
bool bSorted = true;
for ( int i = iFirst; i < iMax - 1 && bSorted; ++i )
if ( FullIsLess ( dExc[i + 1], dExc[i] ) )
bSorted = false;
// order is ok; skip this span
if ( bSorted )
{
iFirst = iMax;
continue;
}
// we need to fix up these collision hits
// convert them from arbitrary "wordid asc" to strict "crc asc, keyword asc" order
// lets begin with looking up hit chunks for every wordid
dChunk.Resize ( iMax - iFirst + 1 );
// find the end
dChunk.Last() = FindFirstGte ( pHits, iHits, uFirstWordid + iMax - iFirst );
if ( !dChunk.Last() )
{
assert ( iMax == dExc.GetLength() && pHits[iHits - 1].m_uWordID == uFirstWordid + iMax - 1 - iFirst );
dChunk.Last() = pHits + iHits;
}
// find the start
dChunk[0] = FindFirstGte ( pHits, int ( dChunk.Last() - pHits ), uFirstWordid );
assert ( dChunk[0] && dChunk[0]->m_uWordID == uFirstWordid );
// find the chunk starts
for ( int i = 1; i < dChunk.GetLength() - 1; i++ )
{
dChunk[i] = FindFirstGte ( dChunk[i - 1], int ( dChunk.Last() - dChunk[i - 1] ), uFirstWordid + i );
assert ( dChunk[i] && dChunk[i]->m_uWordID == uFirstWordid + i );
}
CSphWordHit* pTemp;
if ( iMax - iFirst == 2 )
{
// most frequent case, just two collisions
// OPTIMIZE? allocate buffer for the smaller chunk, not just first chunk
pTemp = new CSphWordHit[dChunk[1] - dChunk[0]];
memcpy ( pTemp, dChunk[0], ( dChunk[1] - dChunk[0] ) * sizeof ( CSphWordHit ) );
memmove ( dChunk[0], dChunk[1], ( dChunk[2] - dChunk[1] ) * sizeof ( CSphWordHit ) );
memcpy ( dChunk[0] + ( dChunk[2] - dChunk[1] ), pTemp, ( dChunk[1] - dChunk[0] ) * sizeof ( CSphWordHit ) );
} else
{
// generic case, more than two
CSphVector<int> dReorder ( iMax - iFirst );
dReorder.FillSeq();
dReorder.Sort ( Lesser ( [pBase = &dExc[iFirst]] ( int a, int b ) noexcept { return FullIsLess ( pBase[a], pBase[b] ); } ) );
// OPTIMIZE? could skip heading and trailing blocks that are already in position
pTemp = new CSphWordHit[dChunk.Last() - dChunk[0]];
CSphWordHit* pOut = pTemp;
ARRAY_FOREACH ( i, dReorder )
{
int iChunk = dReorder[i];
int iChunkHits = int ( dChunk[iChunk + 1] - dChunk[iChunk] );
memcpy ( pOut, dChunk[iChunk], iChunkHits * sizeof ( CSphWordHit ) );
pOut += iChunkHits;
}
assert ( ( pOut - pTemp ) == ( dChunk.Last() - dChunk[0] ) );
memcpy ( dChunk[0], pTemp, ( dChunk.Last() - dChunk[0] ) * sizeof ( CSphWordHit ) );
}
// patching done
SafeDeleteArray ( pTemp );
iFirst = iMax;
}
}
const char* CSphDictKeywords::HitblockGetKeyword ( SphWordID_t uWordID )
{
const DWORD uHash = (DWORD)( uWordID % SLOTS );
HitblockKeyword_t* pEntry = m_dHash[uHash];
while ( pEntry )
{
// check crc
if ( pEntry->m_uWordid != uWordID )
{
// crc mismatch, try next entry
pEntry = pEntry->m_pNextHash;
continue;
}
return pEntry->m_pKeyword;
}
assert ( m_dExceptions.GetLength() );
ARRAY_FOREACH ( i, m_dExceptions )
if ( m_dExceptions[i].m_pEntry->m_uWordid == uWordID )
return m_dExceptions[i].m_pEntry->m_pKeyword;
sphWarning ( "hash missing value in operator [] (wordid=" INT64_FMT ", hash=%u)", (int64_t)uWordID, uHash );
assert ( 0 && "hash missing value in operator []" );
return "\31oops";
}
///////////////////////////////////////////////////////////////////////
DictRefPtr_c sphCreateDictionaryKeywords ( const CSphDictSettings& tSettings, const CSphEmbeddedFiles* pFiles, const TokenizerRefPtr_c& pTokenizer, const char* szIndex, bool bStripFile, int iSkiplistBlockSize, FilenameBuilder_i* pFilenameBuilder, CSphString& sError )
{
DictRefPtr_c pDict { new CSphDictKeywords() };
SetupDictionary ( pDict, tSettings, pFiles, pTokenizer, szIndex, bStripFile, pFilenameBuilder, sError );
// might be empty due to wrong morphology setup
if ( pDict )
pDict->SetSkiplistBlockSize ( iSkiplistBlockSize );
return pDict;
}
| 32,088
|
C++
|
.cpp
| 867
| 34.241061
| 267
| 0.692191
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,986
|
infix_builder.cpp
|
manticoresoftware_manticoresearch/src/dict/infix/infix_builder.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "infix_builder.h"
#include "sphinxint.h"
#include "std/crc32.h"
#include <array>
//////////////////////////////////////////////////////////////////////////
// KEYWORDS STORING DICTIONARY, INFIX HASH BUILDER
//////////////////////////////////////////////////////////////////////////
static constexpr int INFIX_ARENA_LENGTH = 1048576;
template<int SIZE>
struct Infix_t
{
std::array<BYTE, SIZE * sizeof ( DWORD )> m_Data;
#ifndef NDEBUG
BYTE m_TrailingZero = 0;
#endif
void Reset()
{
m_Data.fill ( 0 );
}
bool operator== ( const Infix_t<SIZE>& rhs ) const noexcept
{
return m_Data == rhs.m_Data;
}
};
class InfixIntvec_c
{
private:
// do not change the order of fields in this union - it matters a lot
union {
std::array<DWORD, 4> m_dData;
struct
{
int m_iDynLen;
int m_iDynLimit;
DWORD* m_pDynData;
};
};
bool IsDynamic() const noexcept
{
return ( m_dData[0] & 0x80000000UL ) != 0;
}
public:
InfixIntvec_c()
{
m_dData.fill(0);
}
~InfixIntvec_c()
{
if ( IsDynamic() )
SafeDeleteArray ( m_pDynData );
}
void Add ( DWORD uVal )
{
if ( !m_dData[0] )
{
// empty
m_dData[0] = uVal | 0x01000000UL;
return;
}
if ( !IsDynamic() )
{
// 1..4 static entries
int iLen = m_dData[0] >> 24;
DWORD uLast = m_dData[iLen - 1] & 0x00ffffffUL;
// redundant
if ( uVal == uLast )
return;
// grow static part
if ( iLen < 4 )
{
m_dData[iLen] = uVal;
m_dData[0] = ( m_dData[0] & 0x00ffffffUL ) | ( ++iLen << 24 );
return;
}
// dynamize
DWORD* pDyn = new DWORD[16];
memcpy ( pDyn, m_dData.data(), 4 * sizeof ( DWORD ) );
pDyn[0] &= 0x00ffffffUL;
pDyn[4] = uVal;
m_iDynLen = 0x80000005UL; // dynamic flag, len=5
m_iDynLimit = 16; // limit=16
m_pDynData = pDyn;
return;
}
// N dynamic entries
int iLen = m_iDynLen & 0x00ffffffUL;
if ( uVal == m_pDynData[iLen - 1] )
return;
if ( iLen >= m_iDynLimit )
{
m_iDynLimit *= 2;
auto* pNew = new DWORD[m_iDynLimit];
memcpy ( pNew, m_pDynData, iLen * sizeof ( DWORD ) );
delete[] ( std::exchange ( m_pDynData, pNew ) );
}
m_pDynData[iLen] = uVal;
++m_iDynLen;
}
int GetLength() const noexcept
{
if ( !IsDynamic() )
return m_dData[0] >> 24;
return m_iDynLen & 0x00ffffffUL;
}
DWORD operator[] ( int iIndex ) const noexcept
{
if ( !IsDynamic() )
return m_dData[iIndex] & 0x00ffffffUL;
return m_pDynData[iIndex];
}
};
template<int SIZE>
struct InfixHashEntry_t
{
Infix_t<SIZE> m_tKey; ///< key, owned by the hash
InfixIntvec_c m_tValue; ///< data, owned by the hash
int m_iNext; ///< next entry in hash arena
};
template<int SIZE>
class InfixBuilder_c final: public ISphInfixBuilder
{
std::array<int, INFIX_ARENA_LENGTH> m_dHash; ///< all the hash entries
CSphSwapVector<InfixHashEntry_t<SIZE>> m_dArena;
CSphVector<InfixBlock_t> m_dBlocks;
CSphTightVector<BYTE> m_dBlocksWords;
private:
void AddEntry ( const Infix_t<SIZE>& tKey, DWORD uHash, int iCheckpoint )
{
uHash &= ( INFIX_ARENA_LENGTH - 1 );
int iEntry = m_dArena.GetLength();
InfixHashEntry_t<SIZE>& tNew = m_dArena.Add();
tNew.m_tKey = tKey;
tNew.m_tValue.Add ( iCheckpoint ); // len=1, data=iCheckpoint
tNew.m_iNext = std::exchange ( m_dHash[uHash], iEntry );
}
/// get value pointer by key
InfixIntvec_c* LookupEntry ( const Infix_t<SIZE>& tKey, DWORD uHash )
{
uHash &= ( INFIX_ARENA_LENGTH - 1 );
int iEntry = m_dHash[uHash];
int iiEntry = 0;
while ( iEntry )
{
if ( m_dArena[iEntry].m_tKey == tKey )
{
// mtf it, if needed
if ( iiEntry )
m_dArena[iiEntry].m_iNext = std::exchange ( m_dArena[iEntry].m_iNext, std::exchange ( m_dHash[uHash], iEntry ) );
return &m_dArena[iEntry].m_tValue;
}
iiEntry = std::exchange ( iEntry, m_dArena[iEntry].m_iNext );
}
return nullptr;
}
public:
InfixBuilder_c()
{
// init the hash
for ( auto& uHash : m_dHash )
uHash = 0;
m_dArena.Reserve ( INFIX_ARENA_LENGTH );
m_dArena.Resize ( 1 ); // 0 is a reserved index
}
void AddWord ( const BYTE* pWord, int iWordLength, int iCheckpoint, bool bHasMorphology ) override;
void SaveEntries ( CSphWriter& wrDict ) override;
int64_t SaveEntryBlocks ( CSphWriter& wrDict ) override;
int GetBlocksWordsSize() const override { return m_dBlocksWords.GetLength(); }
};
/// single-byte case, 2-dword infixes
template<>
void InfixBuilder_c<2>::AddWord ( const BYTE* pWord, int iWordLength, int iCheckpoint, bool bHasMorphology )
{
if ( bHasMorphology && *pWord != MAGIC_WORD_HEAD_NONSTEMMED )
return;
if ( *pWord < 0x20 ) // skip heading magic chars, like NONSTEMMED maker
{
++pWord;
--iWordLength;
}
Infix_t<2> sKey;
for ( int p = 0; p <= iWordLength - 2; ++p )
{
sKey.Reset();
auto* pKey = sKey.m_Data.data();
const BYTE* s = pWord + p;
const BYTE* sMax = s + Min ( 6, iWordLength - p );
DWORD uHash = CRC32_start ( *s );
*pKey++ = *s++; // copy first infix byte
while ( s < sMax )
{
CRC32_step ( uHash, *s );
*pKey++ = *s++; // copy another infix byte
InfixIntvec_c * pVal = LookupEntry ( sKey, uHash );
if ( pVal )
pVal->Add ( iCheckpoint );
else
AddEntry ( sKey, uHash, iCheckpoint );
}
}
}
/// UTF-8 case, 3/5-dword infixes
template<int SIZE>
void InfixBuilder_c<SIZE>::AddWord ( const BYTE* pWord, int iWordLength, int iCheckpoint, bool bHasMorphology )
{
if ( bHasMorphology && *pWord != MAGIC_WORD_HEAD_NONSTEMMED )
return;
if ( *pWord < 0x20 ) // skip heading magic chars, like NONSTEMMED maker
{
++pWord;
--iWordLength;
}
const BYTE* pWordMax = pWord + iWordLength;
#ifndef NDEBUG
bool bInvalidTailCp = false;
#endif
int iCodes = 0; // codepoints in current word
std::array<BYTE, SPH_MAX_WORD_LEN + 1> dBytes; // byte offset for each codepoints
// build an offsets table into the bytestring
dBytes[0] = 0;
for ( const BYTE* p = pWord; p < pWordMax && iCodes < SPH_MAX_WORD_LEN; )
{
int iLen = sphUtf8CharBytes ( *p );
// break on tail cut codepoint
if ( p + iLen > pWordMax )
{
#ifndef NDEBUG
bInvalidTailCp = true;
#endif
break;
}
// skip word with large codepoints
if ( iLen > SIZE )
return;
assert ( iLen >= 1 && iLen <= 4 );
p += iLen;
dBytes[iCodes + 1] = dBytes[iCodes] + (BYTE)iLen;
++iCodes;
}
assert ( pWord[dBytes[iCodes]] == 0 || iCodes == SPH_MAX_WORD_LEN || bInvalidTailCp );
// generate infixes
Infix_t<SIZE> sKey;
for ( int p = 0; p <= iCodes - 2; ++p )
{
sKey.Reset();
BYTE* pKey = sKey.m_Data.data();
const BYTE* pKeyMax = pKey + sizeof ( sKey.m_Data );
const BYTE* s = pWord + dBytes[p];
const BYTE* sMax = pWord + dBytes[p + Min ( 6, iCodes - p )];
// copy first infix codepoint
DWORD uHash = 0xffffffffUL;
do
{
CRC32_step ( uHash, *s );
*pKey++ = *s++;
} while ( ( *s & 0xC0 ) == 0x80 );
assert ( s - ( pWord + dBytes[p] ) == ( dBytes[p + 1] - dBytes[p] ) );
while ( s < sMax && pKey < pKeyMax && pKey + sphUtf8CharBytes ( *s ) <= pKeyMax )
{
// copy next infix codepoint
do
{
CRC32_step ( uHash, *s );
*pKey++ = *s++;
} while ( ( *s & 0xC0 ) == 0x80 && pKey < pKeyMax );
assert ( sphUTF8Len ( (const char*)sKey.m_Data.data(), sizeof ( sKey.m_Data ) ) >= 2 );
InfixIntvec_c* pVal = LookupEntry ( sKey, uHash );
if ( pVal )
pVal->Add ( iCheckpoint );
else
AddEntry ( sKey, uHash, iCheckpoint );
}
assert ( (size_t)( pKey - (BYTE*)sKey.m_Data.data() ) <= int ( sizeof ( sKey.m_Data ) ) );
}
}
static inline int ZippedIntSize ( DWORD v ) noexcept
{
if ( v < ( 1UL << 7 ) )
return 1;
if ( v < ( 1UL << 14 ) )
return 2;
if ( v < ( 1UL << 21 ) )
return 3;
if ( v < ( 1UL << 28 ) )
return 4;
return 5;
}
template<int SIZE>
void InfixBuilder_c<SIZE>::SaveEntries ( CSphWriter& wrDict )
{
// intentionally local to this function
// we mark the block end with an editcode of 0
const int INFIX_BLOCK_SIZE = 64;
wrDict.PutBlob ( g_sTagInfixEntries );
CSphVector<int> dIndex;
dIndex.Resize ( m_dArena.GetLength() - 1 );
dIndex.FillSeq(1);
dIndex.Sort ( Lesser ( [this] ( int a, int b ) noexcept { return m_dArena[a].m_tKey.m_Data < m_dArena[b].m_tKey.m_Data; } ) );
m_dBlocksWords.Reserve ( m_dArena.GetLength() / INFIX_BLOCK_SIZE * sizeof ( DWORD ) * SIZE );
int iBlock = 0;
int iPrevKey = -1;
constexpr size_t DWSIZE = sizeof ( DWORD ) * SIZE;
ARRAY_FOREACH ( iIndex, dIndex )
{
InfixIntvec_c& dData = m_dArena[dIndex[iIndex]].m_tValue;
const char* sKey = (const char*)m_dArena[dIndex[iIndex]].m_tKey.m_Data.data();
int iChars = ( SIZE == 2 )
? (int)strnlen ( sKey, DWSIZE )
: sphUTF8Len ( sKey, (int)DWSIZE );
assert ( iChars >= 2 && iChars < int ( 1 + sizeof ( Infix_t<SIZE> ) ) );
// keep track of N-infix blocks
auto iAppendBytes = (int)strnlen ( sKey, DWSIZE );
if ( !iBlock )
{
int iOff = m_dBlocksWords.GetLength();
m_dBlocksWords.Resize ( iOff + iAppendBytes + 1 );
InfixBlock_t& tBlock = m_dBlocks.Add();
tBlock.m_iInfixOffset = iOff;
tBlock.m_iOffset = (DWORD)wrDict.GetPos();
memcpy ( m_dBlocksWords.Begin() + iOff, sKey, iAppendBytes );
m_dBlocksWords[iOff + iAppendBytes] = '\0';
}
// compute max common prefix
// edit_code = ( num_keep_chars<<4 ) + num_append_chars
int iEditCode = iChars;
if ( iPrevKey >= 0 )
{
const char* sPrev = (const char*)m_dArena[dIndex[iPrevKey]].m_tKey.m_Data.data();
const char* sCur = sKey;
const char* sMax = sCur + iAppendBytes;
int iKeepChars = 0;
if constexpr ( SIZE == 2 )
{
// SBCS path
while ( sCur < sMax && *sCur && *sCur == *sPrev )
{
++sCur;
++sPrev;
}
iKeepChars = (int)( sCur - sKey );
assert ( iKeepChars >= 0 && iKeepChars < 16 );
assert ( iChars - iKeepChars >= 0 );
assert ( iChars - iKeepChars < 16 );
iEditCode = ( iKeepChars << 4 ) + ( iChars - iKeepChars );
iAppendBytes = ( iChars - iKeepChars );
sKey = sCur;
}
else
{
// UTF-8 path
const char* sKeyMax = sCur; // track max matching sPrev prefix in [sKey,sKeyMax)
while ( sCur < sMax && *sCur && *sCur == *sPrev )
{
// current byte matches, move the pointer
++sCur;
++sPrev;
// tricky bit
// if the next (!) byte is a valid UTF-8 char start (or eof!)
// then we just matched not just a byte, but a full char
// so bump the matching prefix boundary and length
if ( sCur >= sMax || ( *sCur & 0xC0 ) != 0x80 )
{
sKeyMax = sCur;
++iKeepChars;
}
}
assert ( iKeepChars >= 0 && iKeepChars < 16 );
assert ( iChars - iKeepChars >= 0 );
assert ( iChars - iKeepChars < 16 );
iEditCode = ( iKeepChars << 4 ) + ( iChars - iKeepChars );
iAppendBytes -= (int)( sKeyMax - sKey );
sKey = sKeyMax;
}
}
// write edit code, postfix
wrDict.PutByte ( (BYTE)iEditCode );
wrDict.PutBytes ( sKey, iAppendBytes );
// compute data length
int iDataLen = ZippedIntSize ( dData[0] );
for ( int j = 1; j < dData.GetLength(); ++j )
iDataLen += ZippedIntSize ( dData[j] - dData[j - 1] );
// write data length, data
wrDict.ZipInt ( iDataLen );
wrDict.ZipInt ( dData[0] );
for ( int j = 1; j < dData.GetLength(); ++j )
wrDict.ZipInt ( dData[j] - dData[j - 1] );
// mark block end, restart deltas
iPrevKey = iIndex;
if ( ++iBlock == INFIX_BLOCK_SIZE )
{
iBlock = 0;
iPrevKey = -1;
wrDict.PutByte ( 0 );
}
}
// put end marker
if ( iBlock )
wrDict.PutByte ( 0 );
const char* pBlockWords = (const char*)m_dBlocksWords.Begin();
ARRAY_FOREACH ( i, m_dBlocks )
m_dBlocks[i].m_sInfix = pBlockWords + m_dBlocks[i].m_iInfixOffset;
if ( wrDict.GetPos() > UINT_MAX ) // FIXME!!! change to int64
sphDie ( "INTERNAL ERROR: dictionary size " INT64_FMT " overflow at infix save", wrDict.GetPos() );
}
template<int SIZE>
int64_t InfixBuilder_c<SIZE>::SaveEntryBlocks ( CSphWriter& wrDict )
{
// save the blocks
wrDict.PutBlob ( g_sTagInfixBlocks );
SphOffset_t iInfixBlocksOffset = wrDict.GetPos();
assert ( iInfixBlocksOffset <= INT_MAX );
wrDict.ZipInt ( m_dBlocks.GetLength() );
ARRAY_FOREACH ( i, m_dBlocks )
{
auto iBytes = strlen ( m_dBlocks[i].m_sInfix );
wrDict.PutByte ( BYTE ( iBytes ) );
wrDict.PutBytes ( m_dBlocks[i].m_sInfix, iBytes );
wrDict.ZipInt ( m_dBlocks[i].m_iOffset ); // maybe delta these on top?
}
return iInfixBlocksOffset;
}
std::unique_ptr<ISphInfixBuilder> sphCreateInfixBuilder ( int iCodepointBytes, CSphString* pError )
{
assert ( pError );
switch ( iCodepointBytes )
{
case 0: return nullptr;
case 1: return std::make_unique<InfixBuilder_c<2>>(); // upto 6x1 bytes, 2 dwords, sbcs
case 2: return std::make_unique<InfixBuilder_c<3>>(); // upto 6x2 bytes, 3 dwords, utf-8
case 3: return std::make_unique<InfixBuilder_c<5>>(); // upto 6x3 bytes, 5 dwords, utf-8
default: pError->SetSprintf ( "unhandled max infix codepoint size %d", iCodepointBytes ); return nullptr;
}
}
| 13,444
|
C++
|
.cpp
| 443
| 27.259594
| 127
| 0.639328
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,987
|
gtests_stringbuilder.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_stringbuilder.cpp
|
//
// Copyright (c) 2022-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include <gtest/gtest.h>
#include "std/stringbuilder.h"
#include "std/string.h"
#include "std/scoped_comma.h"
#include "sphinxjson.h"
//////////////////////////////////////////////////////////////////////////
// test on usual +=, <<.
TEST ( functions, stringbuilder_hello )
{
StringBuilder_c builder;
// += of const char string
builder += "Hello";
// << for const char*
builder << " "
<< "world!";
ASSERT_STREQ ( builder.cstr(), "Hello world!" );
// << for CSphString
CSphString s ( "I am" );
builder << s;
ASSERT_STREQ ( builder.cstr(), "Hello world!I am" );
// << for vec of chars
CSphVector<char> dText;
dText.Add ( 'a' );
dText.Add ( 'b' );
dText.Add ( 'c' );
builder << dText;
ASSERT_STREQ ( builder.cstr(), "Hello world!I amabc" );
// AppendChunk of blob
const char* sText = "text";
builder.AppendChunk ( { sText, (int)strlen ( sText ) } );
ASSERT_STREQ ( builder.cstr(), "Hello world!I amabctext" );
// AppendChunk with quotation
builder.AppendChunk ( { sText, (int)strlen ( sText ) }, '`' );
ASSERT_STREQ ( builder.cstr(), "Hello world!I amabctext`text`" );
// AppendString
builder.AppendString ( s );
ASSERT_STREQ ( builder.cstr(), "Hello world!I amabctext`text`I am" );
// AppendString quoted
builder.AppendString ( s, '_' );
ASSERT_STREQ ( builder.cstr(), "Hello world!I amabctext`text`I am_I am_" );
}
// test for scoped-comma modifier.
// comma will automatically append '; ' between ops.
TEST ( functions, stringbuilder_simplescoped )
{
StringBuilder_c builder;
auto tComma = ScopedComma_c ( builder, "; " );
builder += "one";
builder << "two"
<< "three";
builder.Appendf ( "four: %d", 4 );
builder << "five";
ASSERT_STREQ ( builder.cstr(), "one; two; three; four: 4; five" );
}
// scoped comma with prefix (will prepend prefix before first op)
// nested comma (will use other behaviour in isolated scope)
TEST ( functions, stringbuilder_scopedprefixed )
{
StringBuilder_c builder;
auto tC = ScopedComma_c ( builder, "; ", "List: " );
builder += "one";
builder << "two"
<< "three";
ASSERT_STREQ ( builder.cstr(), "List: one; two; three" ) << "plain insert into managed";
{
auto tI = ScopedComma_c ( builder, ": ", "{", "}" );
builder << "first"
<< "second";
ASSERT_STREQ ( builder.cstr(), "List: one; two; three; {first: second" ) << "nested managed insert";
}
ASSERT_STREQ ( builder.cstr(), "List: one; two; three; {first: second}" ) << "nested managed insert terminated";
builder.Appendf ( "four: %d", 4 );
builder << "five";
ASSERT_STREQ ( builder.cstr(), "List: one; two; three; {first: second}; four: 4; five" );
}
// standalone comma. Not necesssary related to stringbuilder, but live alone.
TEST ( functions, stringbuilder_standalone )
{
StringBuilder_c builder;
Comma_c tComma ( ", " ); // default is ', '
builder << tComma << "one";
builder << tComma << "two";
builder << tComma << "three";
ASSERT_STREQ ( builder.cstr(), "one, two, three" );
}
// standalone comma. Not necessary related to stringbuilder, but live alone.
TEST ( functions, stringbuilder_templated )
{
StringBuilder_c builder ( "," );
builder.Sprint ( "one", 3, " ", 4.34, "fine" );
EXPECT_STREQ ( builder.cstr(), "one,3, ,4.340000,fine" );
const char* szData = "hello";
builder << szData; // routed as const char*
const char szData1[] = "hello2";
builder << szData1; // routed as const char[]
CSphString sData2 = "hello3";
builder << sData2.cstr(); // routed as const char*
builder << "hello4"; // routed as const char[]
void* pVoid = nullptr;
builder << pVoid; // routed as const T*
int* pInt = nullptr;
builder << pInt; // routed as const T*
char* pChar = nullptr;
builder << pChar; // routed as const char*, and so, will output nothing
char pCharArr[10] = "fff\0ddd";
builder << (char*)pCharArr; // routed as const char*
builder.Sprint ( szData, szData1, sData2.cstr(), "hello4", pVoid, pInt, pChar, (char*)pCharArr );
builder << pCharArr << "aaa"; // fixme! routed as const char[]. So, tailing "ddd" and "aaa" will NOT be visible, as \0 is inside of pCharArr
ASSERT_STREQ ( builder.cstr(), "one,3, ,4.340000,fine,hello,hello2,hello3,hello4,0x0000000000000000,0x0000000000000000,fff,hello,hello2,hello3,hello4,0x0000000000000000,0x0000000000000000,fff,fff" );
}
TEST ( functions, StringBuilder_sugar )
{
static const struct
{
const char* name;
int value;
} datas[] =
{
{ "one", 1 },
{ "two", 2 },
{ "three", 3 },
{ "four", 4 },
{ "five", 5 },
{ "six", 6 },
{ "seven", 7 },
};
StringBuilder_c sBuf;
ScopedComma_c tComma ( sBuf, dJsonObj );
for ( const auto& data : datas )
{
ScopedComma_c ( sBuf, "=" ).Sink() << data.name << data.value;
sBuf << "dl";
}
sBuf.FinishBlocks();
ASSERT_STREQ ( sBuf.cstr(), "{one=1,dl,two=2,dl,three=3,dl,four=4,dl,five=5,dl,six=6,dl,seven=7,dl}" );
}
// standalone comma. Not necesssary related to stringbuilder, but live alone.
TEST ( functions, stringbuilder_numprint )
{
StringBuilder_c builder;
builder << "sa" << 10 << 10ll;
ASSERT_STREQ ( builder.cstr(), "sa1010" );
builder.Clear();
builder << 1.1f;
ASSERT_STREQ ( builder.cstr(), "1.100000" );
}
// standalone comma. Not necesssary related to stringbuilder, but live alone.
TEST ( functions, stringbuilder_intprint )
{
StringBuilder_c builder;
builder << "sa";
builder << 10;
builder << 10ll;
ASSERT_STREQ ( builder.cstr(), "sa1010" );
}
// many nested scoped commas and 'StartBlock' modifier
// (scoped comma is the same as pair 'StartBlock...FinishBlock')
TEST ( functions, stringbuilder_nested )
{
StringBuilder_c builder;
builder << "one, two, three";
ScopedComma_c lev0 ( builder, ", " );
{
ScopedComma_c lev1 ( builder, ", ", "[", "]" );
builder.StartBlock ( ": ", "(", ")" );
builder.StartBlock ( ";", "{", "}" );
ASSERT_STREQ ( builder.cstr(), "one, two, three" ) << "simple blocks do nothing";
builder << "first"
<< "second"
<< "third";
ASSERT_STREQ ( builder.cstr(), "one, two, three[({first;second;third" ) << "unclosed block";
}
// note that only 'lev1' is destroyed, we didn't explicitly finished two nested blocks.
// but they're finished implicitly
ASSERT_STREQ ( builder.cstr(), "one, two, three[({first;second;third})]" ) << "closed block";
builder << "four";
// note, we doesn't destroy outer comma lev0, but this is not necessary since it doesn't have a suffix.
ASSERT_STREQ ( builder.cstr(), "one, two, three[({first;second;third})], four" ) << "finished block with tail";
}
// pure StartBlock..FinishBlock test
TEST ( functions, stringbuilder_autoclose )
{
StringBuilder_c builder ( ": ", "[", "]" );
// note that there is no ': ' suffixed at the end (since comma only between blocks)
builder << "one"
<< "two";
ASSERT_STREQ ( builder.cstr(), "[one: two" ) << "simple pushed block";
// starting block doesn't mean any output yet, so content is the same
builder.StartBlock ( dBracketsComma );
ASSERT_STREQ ( builder.cstr(), "[one: two" ) << "simple pushed block";
// note that now ': ' of outer block prepended to the suffix '(' of the current block.
builder << "abc"
<< "def";
ASSERT_STREQ ( builder.cstr(), "[one: two: (abc,def" ) << "simple pushed block 2";
// finishing block mean that suffix appended, if the state is different from initial
builder.FinishBlock();
ASSERT_STREQ ( builder.cstr(), "[one: two: (abc,def)" ) << "simple pushed block 2";
builder.FinishBlock();
ASSERT_STREQ ( builder.cstr(), "[one: two: (abc,def)]" ) << "simple pushed block 3";
}
// pure StartBlock..FinishBlock test with one empty block (it outputs nothing)
TEST ( functions, stringbuilder_close_of_empty )
{
StringBuilder_c builder ( ": ", "[", "]" );
// note that there is no ': ' suffixed at the end (since comma only between blocks)
builder << "one"
<< "two";
ASSERT_STREQ ( builder.cstr(), "[one: two" ) << "simple pushed block";
// starting block doesn't output anything by itself, but modify future output
builder.StartBlock ( dBracketsComma );
ASSERT_STREQ ( builder.cstr(), "[one: two" ) << "started new block";
// finishing of empty block outputs also nothing
builder.FinishBlock();
ASSERT_STREQ ( builder.cstr(), "[one: two" ) << "finished empty block";
// finishing non-empty block outputs suffix (and so, doesn't strictly necessary if no suffixes).
builder.FinishBlock();
ASSERT_STREQ ( builder.cstr(), "[one: two]" ) << "final result";
}
// operation 'clear'. Not only wipe content, but also undo any comma state
TEST ( functions, stringbuilder_clear )
{
StringBuilder_c builder ( ": ", "[", "]" );
builder << "one"
<< "two";
builder.StartBlock ( dBracketsComma );
builder << "abc"
<< "def";
builder.Clear();
ASSERT_STREQ ( builder.cstr(), "" ) << "emtpy";
builder << "one"
<< "two";
ASSERT_STREQ ( builder.cstr(), "onetwo" ) << "nocommas";
builder.FinishBlocks();
ASSERT_STREQ ( builder.cstr(), "onetwo" ) << "nocommas";
}
// 'FinishBlocks()' - by default closes ALL opened blocks
TEST ( functions, stringbuilder_twoopenoneclose )
{
StringBuilder_c builder ( ": ", "[", "]" );
builder << "one"
<< "two";
builder.StartBlock ( dBracketsComma );
builder << "abc"
<< "def";
builder.FinishBlocks();
ASSERT_STREQ ( builder.cstr(), "[one: two: (abc,def)]" ) << "simple pushed block 3";
}
// simple start/finish blocks manipulation - outputs nothing by alone
TEST ( functions, stringbuilder_finishnoopen )
{
StringBuilder_c builder ( ":", "[", "]" );
auto pLev = builder.StartBlock ( ";", "(", ")" );
builder.StartBlock ( dJsonObj );
builder.FinishBlocks ( pLev );
ASSERT_STREQ ( builder.cstr(), "" ) << "nothing outputed";
}
// FinishBlocks() to stored state
TEST ( functions, stringbuilder_ret_to_level )
{
// outer block
StringBuilder_c builder ( ":", "[", "]" );
builder << "exone"
<< "extwo";
// middle block - we memorize this state
auto pLev = builder.StartBlock ( ";", "(", ")" );
builder << "one"
<< "two";
// internal block
builder.StartBlock ( dJsonObj );
builder << "three"
<< "four";
ASSERT_STREQ ( builder.cstr(), "[exone:extwo:(one;two;{three,four" );
// finish memorized block and all blocks created after it
builder.FinishBlocks ( pLev );
ASSERT_STREQ ( builder.cstr(), "[exone:extwo:(one;two;{three,four})" );
// it will output into most outer block, since others finished
builder << "ex3";
ASSERT_STREQ ( builder.cstr(), "[exone:extwo:(one;two;{three,four}):ex3" );
// it will finish outer block (and close the bracket).
builder.FinishBlocks();
ASSERT_STREQ ( builder.cstr(), "[exone:extwo:(one;two;{three,four}):ex3]" ) << "test complete";
}
// simple test on Appendf
TEST ( functions, strinbguilder_appendf )
{
StringBuilder_c sRes;
sRes.Appendf ( "12345678" );
ASSERT_STREQ ( sRes.cstr(), "12345678" );
sRes.Appendf ( "this is my rifle this is my gun" );
ASSERT_STREQ ( sRes.cstr(), "12345678this is my rifle this is my gun" );
sRes.Appendf ( " int=%d float=%f string=%s", 123, 456.789, "helloworld" );
ASSERT_STREQ (
sRes.cstr(), "12345678this is my rifle this is my gun int=123 float=456.789000 string=helloworld" );
}
using QuotationEscapedBuilder = EscapedStringBuilder_T<BaseQuotation_T<EscapeQuotator_t>>;
TEST ( functions, EscapedStringBuilder )
{
QuotationEscapedBuilder tBuilder;
tBuilder.AppendEscaped ( "Hello" );
ASSERT_STREQ ( tBuilder.cstr(), "'Hello'" );
tBuilder.AppendEscaped ( " wo\\rl\'d" );
ASSERT_STREQ ( tBuilder.cstr(), "'Hello'' wo\\\\rl\\'d'" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "wo\\rl\'d", EscBld::eFixupSpace );
ASSERT_STREQ ( tBuilder.cstr(), "wo\\rl\'d" );
// generic const char* with different escapes
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here", EscBld::eNone );
ASSERT_STREQ ( tBuilder.cstr(), "space\t and\r 'tab'\n here" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here", EscBld::eFixupSpace );
ASSERT_STREQ ( tBuilder.cstr(), "space and 'tab' here" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here", EscBld::eEscape );
ASSERT_STREQ ( tBuilder.cstr(), "'space\t and\r \\'tab\\'\n here'" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here" );
ASSERT_STREQ ( tBuilder.cstr(), "'space and \\'tab\\' here'" );
// nullptr with different escapes
tBuilder.Clear();
tBuilder.AppendEscaped ( nullptr, EscBld::eNone );
ASSERT_STREQ ( tBuilder.cstr(), "" );
tBuilder.Clear();
tBuilder.AppendEscaped ( nullptr, EscBld::eFixupSpace );
ASSERT_STREQ ( tBuilder.cstr(), "" );
tBuilder.Clear();
tBuilder.AppendEscaped ( nullptr, EscBld::eEscape );
ASSERT_STREQ ( tBuilder.cstr(), "''" );
tBuilder.Clear();
tBuilder.AppendEscaped ( nullptr, EscBld::eAll );
ASSERT_STREQ ( tBuilder.cstr(), "''" );
// empty with different escapes
tBuilder.Clear();
tBuilder.AppendEscaped ( "", EscBld::eNone );
ASSERT_STREQ ( tBuilder.cstr(), "" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "", EscBld::eFixupSpace );
ASSERT_STREQ ( tBuilder.cstr(), "" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "", EscBld::eEscape );
ASSERT_STREQ ( tBuilder.cstr(), "''" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "", EscBld::eAll );
ASSERT_STREQ ( tBuilder.cstr(), "''" );
// len-defined blob
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here", EscBld::eNone, 10 );
ASSERT_STREQ ( tBuilder.cstr(), "space\t and" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here", EscBld::eFixupSpace, 10 );
ASSERT_STREQ ( tBuilder.cstr(), "space and" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here", EscBld::eEscape, 10 );
ASSERT_STREQ ( tBuilder.cstr(), "'space\t and'" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here", EscBld::eAll, 10 );
ASSERT_STREQ ( tBuilder.cstr(), "'space and'" );
// zero-len blob
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here", EscBld::eNone, 0 );
ASSERT_STREQ ( tBuilder.cstr(), "" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here", EscBld::eFixupSpace, 0 );
ASSERT_STREQ ( tBuilder.cstr(), "" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here", EscBld::eEscape, 0 );
ASSERT_STREQ ( tBuilder.cstr(), "''" );
tBuilder.Clear();
tBuilder.AppendEscaped ( "space\t and\r 'tab'\n here", EscBld::eAll, 0 );
ASSERT_STREQ ( tBuilder.cstr(), "''" );
// len-defined blob exactly of given len, non z-terminated.
// (valgrind would check nicely if it even try to touch a byte over allocated buf)
char* buf = new char[5];
memcpy ( buf, "space", 5 );
tBuilder.Clear();
tBuilder.AppendEscaped ( buf, EscBld::eNone, 5 );
ASSERT_STREQ ( tBuilder.cstr(), "space" );
tBuilder.Clear();
tBuilder.AppendEscaped ( buf, EscBld::eFixupSpace, 5 );
ASSERT_STREQ ( tBuilder.cstr(), "space" );
tBuilder.Clear();
tBuilder.AppendEscaped ( buf, EscBld::eEscape, 5 );
ASSERT_STREQ ( tBuilder.cstr(), "'space'" );
tBuilder.Clear();
tBuilder.AppendEscaped ( buf, EscBld::eAll, 5 );
ASSERT_STREQ ( tBuilder.cstr(), "'space'" );
delete[] buf;
}
TEST ( functions, EscapedStringBuilderbounds )
{
QuotationEscapedBuilder tBuilder;
// len-defined blob exactly of given len, non z-terminated.
// (valgrind would check nicely if it even try to touch a byte over allocated buf)
tBuilder.Clear();
tBuilder.AppendEscaped ( "space", EscBld::eAll, 5 );
ASSERT_STREQ ( tBuilder.cstr(), "'space'" );
}
void esc_first_comma ( const char* sText, BYTE eKind, const char* sProof )
{
QuotationEscapedBuilder tBuilder;
tBuilder.StartBlock();
tBuilder << "first";
tBuilder.AppendEscaped ( sText, eKind );
ASSERT_STREQ ( tBuilder.cstr(), sProof ) << (int)eKind;
}
TEST ( functions, EscapedStringBuilderAndCommas )
{
// generic const char* with different escapes
esc_first_comma ( "space\t and\r 'tab'\n here", EscBld::eNone, "first, space\t and\r 'tab'\n here" );
esc_first_comma ( "space\t and\r 'tab'\n here", EscBld::eFixupSpace, "first, space and 'tab' here" );
esc_first_comma ( "space\t and\r 'tab'\n here", EscBld::eEscape, "first, 'space\t and\r \\'tab\\'\n here'" );
esc_first_comma ( "space\t and\r 'tab'\n here", EscBld::eAll, "first, 'space and \\'tab\\' here'" );
// null with different escapes
esc_first_comma ( nullptr, EscBld::eNone, "first" );
esc_first_comma ( nullptr, EscBld::eFixupSpace, "first" );
esc_first_comma ( nullptr, EscBld::eEscape, "first, ''" );
esc_first_comma ( nullptr, EscBld::eAll, "first, ''" );
}
TEST ( functions, JsonNamedEssence )
{
StringBuilder_c sRes ( ",", "{", "}" );
sRes << "hello";
ASSERT_STREQ ( sRes.cstr(), "{hello" );
sRes << "world";
ASSERT_STREQ ( sRes.cstr(), "{hello,world" );
sRes.AppendName ( "bla" );
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":" );
sRes << "foo";
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo" );
sRes << "bar";
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar" );
sRes.AppendName ( "bar" ).Sprintf ( "%d", 1000 );
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar,\"bar\":1000" );
ScopedComma_c sOne ( sRes, ";", "[", "]" );
sRes.AppendName ( "foo" ) << "bar";
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar,\"bar\":1000,[\"foo\":bar" );
sRes.SkipNextComma();
sRes << "baz";
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar,\"bar\":1000,[\"foo\":barbaz" );
sRes << "end";
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar,\"bar\":1000,[\"foo\":barbaz;end" );
sRes.FinishBlock();
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar,\"bar\":1000,[\"foo\":barbaz;end]" );
sRes << "End";
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar,\"bar\":1000,[\"foo\":barbaz;end],End" );
sRes.AppendName ( "arr" );
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar,\"bar\":1000,[\"foo\":barbaz;end],End,\"arr\":" );
sRes.StartBlock ( "|", "[", "]" );
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar,\"bar\":1000,[\"foo\":barbaz;end],End,\"arr\":" );
sRes.FinishBlock ( false );
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar,\"bar\":1000,[\"foo\":barbaz;end],End,\"arr\":[]" );
sRes.AppendName ( "a" ).StartBlock ( "|", "[", "]" );
sRes << "b";
sRes.FinishBlock();
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar,\"bar\":1000,[\"foo\":barbaz;end],End,\"arr\":[],\"a\":[b]" );
sRes.FinishBlock();
ASSERT_STREQ ( sRes.cstr(), "{hello,world,\"bla\":foo,bar,\"bar\":1000,[\"foo\":barbaz;end],End,\"arr\":[],\"a\":[b]}" );
}
TEST ( functions, EscapedStringBuilderAndSkipCommas )
{
// generic const char* with different escapes, exclude comma
esc_first_comma ( "space\t and\r 'tab'\n here", EscBld::eNone | EscBld::eSkipComma, "firstspace\t and\r 'tab'\n here" );
esc_first_comma ( "space\t and\r 'tab'\n here", EscBld::eFixupSpace | EscBld::eSkipComma, "firstspace and 'tab' here" );
esc_first_comma ( "space\t and\r 'tab'\n here", EscBld::eEscape | EscBld::eSkipComma, "first'space\t and\r \\'tab\\'\n here'" );
esc_first_comma ( "space\t and\r 'tab'\n here", EscBld::eAll | EscBld::eSkipComma, "first'space and \\'tab\\' here'" );
// null with different escapes, exclude comma
esc_first_comma ( nullptr, EscBld::eNone | EscBld::eSkipComma, "first" );
esc_first_comma ( nullptr, EscBld::eFixupSpace | EscBld::eSkipComma, "first" );
esc_first_comma ( nullptr, EscBld::eEscape | EscBld::eSkipComma, "first''" );
esc_first_comma ( nullptr, EscBld::eAll | EscBld::eSkipComma, "first''" );
}
TEST ( functions, JsonEscapedBuilder_sugar )
{
JsonEscapedBuilder tOut;
// scoped name
{
auto _ = tOut.Array();
auto tNamed = tOut.Named ( "test1" );
tOut << "one"
<< "two";
tOut.AppendEscaped ( "blabla" );
};
EXPECT_STREQ ( tOut.cstr(), "[\"test1\":onetwo\"blabla\"]" );
// scoped immediate name
tOut.Clear();
{
auto _ = tOut.Array();
tOut.Named ( "test1" ).Sink() << "one"
<< "two";
tOut.AppendEscaped ( "blabla" );
};
EXPECT_STREQ ( tOut.cstr(), "[\"test1\":onetwo,\"blabla\"]" );
// block name
tOut.Clear();
{
auto _ = tOut.Array();
tOut.NamedBlock ( "test1" );
tOut << "one"
<< "two";
tOut.AppendEscaped ( "blabla" );
tOut.FinishBlock();
}
EXPECT_STREQ ( tOut.cstr(), "[\"test1\":onetwo\"blabla\"]" );
// scoped object
tOut.Clear();
{
auto tObj = tOut.Object();
tOut.Named ( "val1" ).Sink() << 1;
tOut.Named ( "val2" ).Sink() << 2;
}
EXPECT_STREQ ( tOut.cstr(), "{\"val1\":1,\"val2\":2}" );
// scoped immediate object
tOut.Clear();
( tOut.Object().Sink().AppendName ( "val1" ) << 1 ).AppendName ( "val2" ) << 2;
EXPECT_STREQ ( tOut.cstr(), "{\"val1\":1,\"val2\":2}" );
// block object
tOut.Clear();
tOut.ObjectBlock();
tOut.Named ( "val1" ).Sink() << 1;
tOut.Named ( "val2" ).Sink() << 2;
tOut.FinishBlocks();
EXPECT_STREQ ( tOut.cstr(), "{\"val1\":1,\"val2\":2}" );
// scoped array
tOut.Clear();
{
auto tObj = tOut.Array();
tOut << 1 << 2 << 3 << 4;
}
EXPECT_STREQ ( tOut.cstr(), "[1,2,3,4]" );
// scoped immediate array
tOut.Clear();
tOut.Array().Sink() << 1 << 2 << 3 << 4;
EXPECT_STREQ ( tOut.cstr(), "[1,2,3,4]" );
// block array
tOut.Clear();
{
auto _ = tOut.Array();
tOut << 1 << 2 << 3 << 4;
}
EXPECT_STREQ ( tOut.cstr(), "[1,2,3,4]" );
// scoped immediate warray
tOut.Clear();
tOut.ArrayW().Sink() << 1 << 2 << 3 << 4;
EXPECT_STREQ ( tOut.cstr(), "[\n1,\n2,\n3,\n4\n]" );
// control characters immediate warray
tOut.Clear();
tOut.AppendEscaped ( "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", EscBld::eAll, 16 );
EXPECT_STREQ ( tOut.cstr(), "\"\\u0000\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\b\\t\\n\\u000b\\f\\r\\u000e\\u000f\"" );
tOut.Clear();
tOut.AppendEscaped ( "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" );
EXPECT_STREQ ( tOut.cstr(), "\"\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\u0016\\u0017\\u0018\\u0019\\u001a\\u001b\\u001c\\u001d\\u001e\\u001f\"" );
}
| 22,400
|
C++
|
.cpp
| 563
| 37.609236
| 200
| 0.657081
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,988
|
gtests_pqstuff.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_pqstuff.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <gtest/gtest.h>
#include "sphinxint.h"
#include "sphinxpq.h"
class PQ_merge : public ::testing::Test
{
protected:
virtual void SetUp ()
{
dContexts[0].m_iDocsMatched = 3;
dContexts[0].m_iEarlyPassed = 1;
dContexts[0].m_iOnlyTerms = 1;
dContexts[0].m_iQueriesFailed = 1;
for ( auto i : { 0, 1, } )
{
dContexts[0].m_dQueryMatched.Add ( dQueries[i] );
dContexts[0].m_dDt.Add ( i+1000 );
}
for ( auto i : {2, 10, 11, 1, 12}) dContexts[0].m_dDocsMatched.Add ( i );
dContexts[1].m_iDocsMatched = 2;
dContexts[1].m_iEarlyPassed = 1;
dContexts[1].m_iOnlyTerms = 1;
dContexts[1].m_iQueriesFailed = 0;
for ( auto i : { 2, 3, } )
{
dContexts[1].m_dQueryMatched.Add ( dQueries[i] );
dContexts[1].m_dDt.Add ( i + 2000 );
}
for ( auto i : { 1, 21, 1, 22 } ) dContexts[1].m_dDocsMatched.Add ( i );
dContexts[2].m_iDocsMatched = 2;
dContexts[2].m_iEarlyPassed = 1;
dContexts[2].m_iOnlyTerms = 0;
dContexts[2].m_iQueriesFailed = 0;
for ( auto i : { 8, 9, } )
{
dContexts[2].m_dQueryMatched.Add ( dQueries[i] );
dContexts[2].m_dDt.Add ( i + 3000 );
}
for ( auto i : { 1, 31, 1, 32 } ) dContexts[2].m_dDocsMatched.Add ( i );
dResult.m_iTotalQueries = 10;
}
PercolateQueryDesc dQueries[10] {
{ 100, "query0", "tag0", "filter0", false }
, { 101, "query1", "tag1", "filter1", false }
, { 102, "query2", "tag2", "filter2", false }
, { 103, "query3", "tag3", "filter3", false }
, { 104, "query4", "tag4", "filter4", false }
, { 150, "query5", "tag5", "filter5", false }
, { 160, "query6", "tag6", "filter6", false }
, { 170, "query7", "tag7", "filter7", false }
, { 180, "query8", "tag8", "filter8", false }
, { 190, "query9", "tag9", "filter9", false } };
PQMatchContextResult_t dContexts[3];
PercolateMatchResult_t dResult;
};
//////////////////////////////////////////////////////////////////////////
// one simple result, without details
TEST_F ( PQ_merge, JustOneResult )
{
CSphVector<PQMatchContextResult_t *> dSrc;
dSrc.Add ( &dContexts[0] );
PercolateMergeResults ( dSrc, dResult );
// general numbers
ASSERT_EQ ( dResult.m_iEarlyOutQueries, 9 );
ASSERT_EQ ( dResult.m_iOnlyTerms, 1 );
ASSERT_EQ ( dResult.m_iQueriesFailed, 1 );
ASSERT_EQ ( dResult.m_iQueriesMatched, 2 );
ASSERT_EQ ( dResult.m_iDocsMatched, 3 );
// ensure that without verbosity we have no DT
ASSERT_FALSE ( dResult.m_bVerbose );
ASSERT_TRUE ( dResult.m_dQueryDT.IsEmpty() );
// ensure that if no docs requested, nothing returned
ASSERT_FALSE ( dResult.m_bGetDocs );
ASSERT_TRUE ( dResult.m_dDocs.IsEmpty () );
// ensure we take correct matches
ASSERT_EQ ( dResult.m_dQueryDesc.GetLength(), dResult.m_iQueriesMatched );
ASSERT_EQ ( dResult.m_dQueryDesc[0].m_iQUID, 100 );
ASSERT_EQ ( dResult.m_dQueryDesc[1].m_iQUID, 101 );
}
// one result, but with list of docs
TEST_F ( PQ_merge, JustOneResultWithDocs )
{
CSphVector<PQMatchContextResult_t *> dSrc;
dSrc.Add ( &dContexts[0] );
dResult.m_bGetDocs = true;
PercolateMergeResults ( dSrc, dResult );
// general numbers
ASSERT_EQ ( dResult.m_iEarlyOutQueries, 9 );
ASSERT_EQ ( dResult.m_iOnlyTerms, 1 );
ASSERT_EQ ( dResult.m_iQueriesFailed, 1 );
ASSERT_EQ ( dResult.m_iQueriesMatched, 2 );
ASSERT_EQ ( dResult.m_iDocsMatched, 3 );
// ensure that without verbosity we have no DT
ASSERT_FALSE ( dResult.m_bVerbose );
ASSERT_TRUE ( dResult.m_dQueryDT.IsEmpty () );
// ensure that if no docs requested, nothing returned
ASSERT_TRUE ( dResult.m_bGetDocs );
ASSERT_EQ ( dResult.m_dDocs.GetLength (), 5 );
int j = 0; for ( auto i : { 2, 10, 11, 1, 12 }) ASSERT_EQ ( dResult.m_dDocs[j++], i);
// ensure we take correct matches
ASSERT_EQ ( dResult.m_dQueryDesc.GetLength (), dResult.m_iQueriesMatched );
ASSERT_EQ ( dResult.m_dQueryDesc[0].m_iQUID, 100 );
ASSERT_EQ ( dResult.m_dQueryDesc[1].m_iQUID, 101 );
}
// one result, but with times
TEST_F ( PQ_merge, JustOneResultVerbose )
{
CSphVector<PQMatchContextResult_t *> dSrc;
dSrc.Add ( &dContexts[0] );
dResult.m_bVerbose = true;
PercolateMergeResults ( dSrc, dResult );
// general numbers
ASSERT_EQ ( dResult.m_iEarlyOutQueries, 9 );
ASSERT_EQ ( dResult.m_iOnlyTerms, 1 );
ASSERT_EQ ( dResult.m_iQueriesFailed, 1 );
ASSERT_EQ ( dResult.m_iQueriesMatched, 2 );
ASSERT_EQ ( dResult.m_iDocsMatched, 3 );
// ensure that without verbosity we have no DT
ASSERT_TRUE ( dResult.m_bVerbose );
ASSERT_EQ ( dResult.m_dQueryDT.GetLength (), dResult.m_iQueriesMatched );
ASSERT_EQ ( dResult.m_dQueryDT[0], 1000 );
ASSERT_EQ ( dResult.m_dQueryDT[1], 1001 );
// ensure that if no docs requested, nothing returned
ASSERT_FALSE ( dResult.m_bGetDocs );
ASSERT_TRUE ( dResult.m_dDocs.IsEmpty () );
// ensure we take correct matches
ASSERT_EQ ( dResult.m_dQueryDesc.GetLength (), dResult.m_iQueriesMatched );
ASSERT_EQ ( dResult.m_dQueryDesc[0].m_iQUID, 100 );
ASSERT_EQ ( dResult.m_dQueryDesc[1].m_iQUID, 101 );
}
// one result, with everything
TEST_F ( PQ_merge, JustOneResultWithEverything )
{
CSphVector<PQMatchContextResult_t *> dSrc;
dSrc.Add ( &dContexts[0] );
dResult.m_bVerbose = true;
dResult.m_bGetDocs = true;
PercolateMergeResults ( dSrc, dResult );
// general numbers
ASSERT_EQ ( dResult.m_iEarlyOutQueries, 9 );
ASSERT_EQ ( dResult.m_iOnlyTerms, 1 );
ASSERT_EQ ( dResult.m_iQueriesFailed, 1 );
ASSERT_EQ ( dResult.m_iQueriesMatched, 2 );
ASSERT_EQ ( dResult.m_iDocsMatched, 3 );
// ensure that without verbosity we have no DT
ASSERT_TRUE ( dResult.m_bVerbose );
ASSERT_EQ ( dResult.m_dQueryDT.GetLength (), dResult.m_iQueriesMatched );
ASSERT_EQ ( dResult.m_dQueryDT[0], 1000 );
ASSERT_EQ ( dResult.m_dQueryDT[1], 1001 );
// ensure that if no docs requested, nothing returned
ASSERT_TRUE ( dResult.m_bGetDocs );
ASSERT_EQ ( dResult.m_dDocs.GetLength (), 5 );
int j = 0;
for ( auto i : { 2, 10, 11, 1, 12 } )
ASSERT_EQ ( dResult.m_dDocs[j++], i );
// ensure we take correct matches
ASSERT_EQ ( dResult.m_dQueryDesc.GetLength (), dResult.m_iQueriesMatched );
ASSERT_EQ ( dResult.m_dQueryDesc[0].m_iQUID, 100 );
ASSERT_EQ ( dResult.m_dQueryDesc[1].m_iQUID, 101 );
}
// full result, with everything
TEST_F ( PQ_merge, FullResult )
{
CSphVector<PQMatchContextResult_t *> dSrc;
dSrc.Add ( &dContexts[2] );
dSrc.Add ( &dContexts[0] );
dSrc.Add ( &dContexts[1] );
PercolateMergeResults ( dSrc, dResult );
// general numbers
ASSERT_EQ ( dResult.m_iEarlyOutQueries, 7 );
ASSERT_EQ ( dResult.m_iOnlyTerms, 2 );
ASSERT_EQ ( dResult.m_iQueriesFailed, 1 );
ASSERT_EQ ( dResult.m_iQueriesMatched, 6 );
ASSERT_EQ ( dResult.m_iDocsMatched, 7 );
// ensure that without verbosity we have no DT
ASSERT_FALSE ( dResult.m_bVerbose );
ASSERT_TRUE ( dResult.m_dQueryDT.IsEmpty () );
// ensure that if no docs requested, nothing returned
ASSERT_FALSE ( dResult.m_bGetDocs );
ASSERT_TRUE ( dResult.m_dDocs.IsEmpty () );
// ensure we take correct matches
ASSERT_EQ ( dResult.m_dQueryDesc.GetLength (), dResult.m_iQueriesMatched );
int j = 0;
for ( auto qid : { 100, 101, 102, 103, 180, 190 } )
ASSERT_EQ ( dResult.m_dQueryDesc[j++].m_iQUID, qid );
}
// full result, with everything
TEST_F ( PQ_merge, FullResultWithDocs )
{
CSphVector<PQMatchContextResult_t *> dSrc;
dSrc.Add ( &dContexts[2] );
dSrc.Add ( &dContexts[0] );
dSrc.Add ( &dContexts[1] );
dResult.m_bGetDocs = true;
PercolateMergeResults ( dSrc, dResult );
// general numbers
ASSERT_EQ ( dResult.m_iEarlyOutQueries, 7 );
ASSERT_EQ ( dResult.m_iOnlyTerms, 2 );
ASSERT_EQ ( dResult.m_iQueriesFailed, 1 );
ASSERT_EQ ( dResult.m_iQueriesMatched, 6 );
ASSERT_EQ ( dResult.m_iDocsMatched, 7 );
// ensure that without verbosity we have no DT
ASSERT_FALSE ( dResult.m_bVerbose );
ASSERT_TRUE ( dResult.m_dQueryDT.IsEmpty () );
// ensure that if no docs requested, nothing returned
ASSERT_TRUE ( dResult.m_bGetDocs );
ASSERT_EQ ( dResult.m_dDocs.GetLength (), 13 );
int j = 0;
for ( auto i : { 2, 10, 11, 1, 12, 1, 21, 1, 22, 1, 31, 1, 32 } )
ASSERT_EQ ( dResult.m_dDocs[j++], i );
// ensure we take correct matches
ASSERT_EQ ( dResult.m_dQueryDesc.GetLength (), dResult.m_iQueriesMatched );
j = 0;
for ( auto qid : { 100, 101, 102, 103, 180, 190 } )
ASSERT_EQ ( dResult.m_dQueryDesc[j++].m_iQUID, qid );
}
// full result, with everything
TEST_F ( PQ_merge, FullResultVerbose )
{
CSphVector<PQMatchContextResult_t *> dSrc;
dSrc.Add ( &dContexts[2] );
dSrc.Add ( &dContexts[0] );
dSrc.Add ( &dContexts[1] );
dResult.m_bVerbose = true;
PercolateMergeResults ( dSrc, dResult );
// general numbers
ASSERT_EQ ( dResult.m_iEarlyOutQueries, 7 );
ASSERT_EQ ( dResult.m_iOnlyTerms, 2 );
ASSERT_EQ ( dResult.m_iQueriesFailed, 1 );
ASSERT_EQ ( dResult.m_iQueriesMatched, 6 );
ASSERT_EQ ( dResult.m_iDocsMatched, 7 );
// ensure that without verbosity we have no DT
ASSERT_TRUE ( dResult.m_bVerbose );
ASSERT_EQ ( dResult.m_dQueryDT.GetLength (), dResult.m_iQueriesMatched );
int j = 0;
for ( auto dt : { 1000, 1001, 2002, 2003, 3008, 3009 } )
ASSERT_EQ ( dResult.m_dQueryDT[j++], dt );
// ensure that if no docs requested, nothing returned
ASSERT_FALSE ( dResult.m_bGetDocs );
ASSERT_TRUE ( dResult.m_dDocs.IsEmpty () );
// ensure we take correct matches
ASSERT_EQ ( dResult.m_dQueryDesc.GetLength (), dResult.m_iQueriesMatched );
j = 0;
for ( auto qid : { 100, 101, 102, 103, 180, 190 } )
ASSERT_EQ ( dResult.m_dQueryDesc[j++].m_iQUID, qid );
}
// full result, with everything
TEST_F ( PQ_merge, FullResultWithEverything )
{
CSphVector<PQMatchContextResult_t *> dSrc;
dSrc.Add ( &dContexts[2] );
dSrc.Add ( &dContexts[0] );
dSrc.Add ( &dContexts[1] );
dResult.m_bVerbose = true;
dResult.m_bGetDocs = true;
PercolateMergeResults ( dSrc, dResult );
// general numbers
ASSERT_EQ ( dResult.m_iEarlyOutQueries, 7 );
ASSERT_EQ ( dResult.m_iOnlyTerms, 2 );
ASSERT_EQ ( dResult.m_iQueriesFailed, 1 );
ASSERT_EQ ( dResult.m_iQueriesMatched, 6 );
ASSERT_EQ ( dResult.m_iDocsMatched, 7 );
// ensure that without verbosity we have no DT
ASSERT_TRUE ( dResult.m_bVerbose );
ASSERT_EQ ( dResult.m_dQueryDT.GetLength (), dResult.m_iQueriesMatched );
int j = 0;
for ( auto dt : { 1000, 1001, 2002, 2003, 3008, 3009 } ) ASSERT_EQ ( dResult.m_dQueryDT[j++], dt );
// ensure that if no docs requested, nothing returned
ASSERT_TRUE ( dResult.m_bGetDocs );
ASSERT_EQ ( dResult.m_dDocs.GetLength (), 13 );
j = 0;
for ( auto i : { 2, 10, 11, 1, 12, 1, 21, 1, 22, 1, 31, 1, 32 } )
ASSERT_EQ ( dResult.m_dDocs[j++], i );
// ensure we take correct matches
ASSERT_EQ ( dResult.m_dQueryDesc.GetLength (), dResult.m_iQueriesMatched );
j=0;
for ( auto qid : { 100, 101, 102, 103, 180, 190 } )
ASSERT_EQ ( dResult.m_dQueryDesc[j++].m_iQUID, qid );
}
| 11,113
|
C++
|
.cpp
| 287
| 36.498258
| 100
| 0.688614
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| true
| false
| false
| true
| false
| false
|
16,989
|
gtests_rtstuff.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_rtstuff.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <gtest/gtest.h>
#include "sphinxint.h"
#include "attribute.h"
#include "sphinxrt.h"
#include "sphinxsort.h"
#include "searchdaemon.h"
#include "binlog.h"
#include "accumulator.h"
#include <gmock/gmock.h>
//////////////////////////////////////////////////////////////////////////
static void DeleteIndexFiles ( const char * sIndex )
{
if ( !sIndex )
return;
const char * sExts[] = {
"kill", "lock", "meta", "ram", "0.spa", "0.spd", "0.spe", "0.sph", "0.spi", "0.spk", "0.spm", "0.spp" };
CSphString sName;
for (auto & sExt : sExts)
{
sName.SetSprintf ( "%s.%s", sIndex, sExt );
unlink ( sName.cstr () );
}
}
void TestRTInit ()
{
CSphConfigSection tRTConfig;
sphRTInit ( "" );
Binlog::Configure ( tRTConfig, 0 );
SmallStringHash_T<CSphIndex *> hIndexes;
Binlog::Replay ( hIndexes );
}
#define RT_INDEX_FILE_NAME "test_temp"
class MockTestDoc_c : public CSphSource
{
public:
explicit MockTestDoc_c ( const CSphSchema &tSchema, BYTE ** ppDocs, int iDocs, int iFields )
: CSphSource ( "test_doc" )
{
m_tSchema = tSchema;
m_ppDocs = ppDocs;
m_iDocCount = iDocs;
m_iFields = iFields;
m_dFieldLengths.Resize ( m_iFields );
m_dFields.Reserve ( iFields );
}
BYTE ** NextDocument ( bool & bEOF, CSphString & ) override
{
bEOF = false;
int iDoc = (int)++m_iDocsCounter;
if ( iDoc>=m_iDocCount )
{
bEOF = true;
return nullptr;
}
for ( int i = 0; i<m_iFields; i++ )
{
char * szField = ( char * ) ( m_ppDocs + iDoc * m_iFields )[i];
m_dFieldLengths[i] = (int) strlen ( szField );
}
const CSphColumnInfo * pId = m_tSchema.GetAttr ( sphGetDocidName() );
assert ( pId );
m_tDocInfo.SetAttr( pId->m_tLocator, iDoc+1 );
return m_ppDocs + iDoc * m_iFields;
}
MOCK_CONST_METHOD0( GetFieldLengths, const int *() ); // return m_dFieldLengths.Begin();
MOCK_METHOD1 ( Connect, bool ( CSphString & ) ); // return true;
MOCK_METHOD0 ( Disconnect, void() );
bool IterateStart ( CSphString & ) final
{
m_tDocInfo.Reset ( m_tSchema.GetRowSize () );
m_iPlainFieldsLength = m_tSchema.GetFieldsCount();
m_iDocsCounter = -1;
return true;
}
MOCK_METHOD2 ( IterateMultivaluedStart, bool ( int, CSphString& )); // return false;
MOCK_METHOD2 ( IterateMultivaluedNext, bool(int64_t &, int64_t &)); // return false;
MOCK_METHOD1 ( IterateKillListStart, bool (CSphString & ) ); // return false;
MOCK_METHOD1 ( IterateKillListNext, bool (DocID_t & ) ) ; // return false
int GetFieldCount () const { return m_iFields; }
CSphVector<VecTraits_T<const char>> GetFields ()
{
m_dFields.Resize(0);
for ( int i=0; i<m_iFields; ++i)
{
auto pStr = (const char*) m_ppDocs[m_iDocsCounter*m_iFields + i];
m_dFields.Add ( VecTraits_T<const char> (pStr,strlen(pStr)));
}
return m_dFields;
}
int m_iDocsCounter;
int m_iDocCount;
int m_iFields;
BYTE ** m_ppDocs;
CSphVector<VecTraits_T<const char> > m_dFields;
CSphVector<int> m_dFieldLengths;
};
class MockDocRandomizer_c : public CSphSource
{
public:
static const int m_iMaxFields = 2;
static const int m_iMaxFieldLen = 512;
char m_dFields[m_iMaxFields][m_iMaxFieldLen];
char * m_ppFields[m_iMaxFields];
CSphVector<VecTraits_T<const char> > m_dMeasuredFields;
int m_dFieldLengths[m_iMaxFields];
int m_iDocsCounter;
explicit MockDocRandomizer_c ( const CSphSchema & tSchema ) : CSphSource ( "test_doc" )
{
m_tSchema = tSchema;
m_dMeasuredFields.Reserve(m_iMaxFields);
for ( int i=0; i<m_iMaxFields; ++i )
m_ppFields[i] = (char *)&m_dFields[i];
}
BYTE ** NextDocument ( bool & bEOF, CSphString & ) override
{
bEOF = false;
if ( m_iDocsCounter>800 )
{
bEOF = true;
return nullptr;
}
++m_tDocInfo.m_tRowID;
++m_iDocsCounter;
m_tDocInfo.SetAttr ( m_tSchema.GetAttr(0).m_tLocator, m_tDocInfo.m_tRowID+1000 );
m_tDocInfo.SetAttr ( m_tSchema.GetAttr(1).m_tLocator, 1313 );
snprintf ( m_dFields[0], m_iMaxFieldLen, "cat title%d title%d title%d title%d title%d"
, sphRand(), sphRand(), sphRand(), sphRand(), sphRand() );
snprintf ( m_dFields[1], m_iMaxFieldLen, "dog contentwashere%d contentwashere%d contentwashere%d contentwashere%d contentwashere%d"
, sphRand(), sphRand(), sphRand(), sphRand(), sphRand() );
for ( int i=0; i < m_iMaxFields; ++i )
m_dFieldLengths[i] = (int) strlen ( m_ppFields[i] );
return (BYTE**) &m_ppFields[0];
}
MOCK_CONST_METHOD0( GetFieldLengths, const int *() ); // return m_dFieldLengths.Begin();
MOCK_METHOD1 ( Connect, bool ( CSphString & ) ); // return true;
MOCK_METHOD0 ( Disconnect, void () );
bool IterateStart ( CSphString & ) final
{
m_tDocInfo.Reset ( m_tSchema.GetRowSize () );
m_iDocsCounter = 0;
m_iPlainFieldsLength = m_tSchema.GetFieldsCount();
return true;
}
MOCK_METHOD2 ( IterateMultivaluedStart, bool ( int, CSphString & ) ); // return false;
MOCK_METHOD2 ( IterateMultivaluedNext, bool (int64_t &, int64_t &) ); // return false;
MOCK_METHOD1 ( IterateKillListStart, bool (CSphString & ) ); // return false;
MOCK_METHOD1 ( IterateKillListNext, bool (DocID_t & ) ); // return false
int GetFieldCount () const { return m_iMaxFields; }
CSphVector<VecTraits_T<const char>> GetFields ()
{
m_dMeasuredFields.Resize ( 0 );
for ( const char * pStr : m_ppFields )
m_dMeasuredFields.Add ( VecTraits_T<const char> ( pStr, strlen ( pStr ) ) );
return m_dMeasuredFields;
}
};
//////////////////////////////////////////////////////////////////////////
class RT : public ::testing::Test
{
protected:
void SetUp() override
{
DeleteIndexFiles ( RT_INDEX_FILE_NAME );
TestRTInit();
tDictSettings.m_bWordDict = false;
pTok = Tokenizer::Detail::CreateUTF8Tokenizer ();
tSrcSchema.Reset ();
tSrcSchema.AddField ( "title" );
tSrcSchema.AddField ( "content" );
}
void TearDown() override
{
Binlog::Deinit ();
DeleteIndexFiles ( RT_INDEX_FILE_NAME );
}
CSphColumnInfo tCol;
CSphSchema tSrcSchema;
CSphString sError, sWarning;
TokenizerRefPtr_c pTok;
CSphDictSettings tDictSettings;
};
/*
* It was instantiated several times, but that wasn't work, since on every instantiation couple of attributes was inserted into schema, having idex's schema the same.
*/
TEST_F ( RT, WeightBoundary )
{
DWORD uParam = 1500;
using namespace testing;
Threads::CallCoroutine ( [&] {
DictRefPtr_c pDict { sphCreateDictionaryCRC ( tDictSettings, nullptr, pTok, "weight", false, 32, nullptr, sError ) };
tCol.m_sName = "id";
tCol.m_eAttrType = SPH_ATTR_BIGINT;
tSrcSchema.AddAttr ( tCol, true );
tCol.m_sName = "channel_id";
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tSrcSchema.AddAttr ( tCol, true );
const char * dFields[] = { "If I were a cat...", "We are the greatest cat" };
auto * pSrc = new MockTestDoc_c ( tSrcSchema, ( BYTE ** ) dFields, 1, 2 );
EXPECT_CALL ( *pSrc, Connect ( _ ) ).WillOnce ( Return ( true ) );
EXPECT_CALL ( *pSrc, GetFieldLengths () ).WillOnce ( Return ( pSrc->m_dFieldLengths.Begin () ) );
EXPECT_CALL ( *pSrc, Disconnect () );
pSrc->SetTokenizer ( pTok );
pSrc->SetDict ( pDict );
pSrc->Setup ( CSphSourceSettings(), nullptr );
EXPECT_TRUE ( pSrc->Connect ( sError ) );
EXPECT_TRUE ( pSrc->IterateStart ( sError ) );
EXPECT_TRUE ( pSrc->UpdateSchema ( &tSrcSchema, sError ) );
CSphSchema tSchema; // source schema must be all dynamic attrs; but index ones must be static
for ( int i=0; i<tSrcSchema.GetFieldsCount(); i++ )
tSchema.AddField ( tSrcSchema.GetField(i) );
for ( int i=0; i<tSrcSchema.GetAttrsCount(); i++ )
tSchema.AddAttr ( tSrcSchema.GetAttr(i), false );
auto pIndex = sphCreateIndexRT ( "testrt", RT_INDEX_FILE_NAME, tSchema, 32 * 1024 * 1024, false );
// tricky bit
// index owns its tokenizer/dict pair, and MAY do whatever it wants
// and starting with meta v4, it WILL deallocate tokenizer/dict in Prealloc()
// in favor of tokenizer/dict loaded from the saved settings in meta
// however, source still needs those guys!
// so for simplicity i just clone them
pIndex->SetTokenizer ( pTok->Clone ( SPH_CLONE_INDEX ) );
pIndex->SetDictionary ( pDict->Clone () );
pIndex->PostSetup ();
StrVec_t dWarnings;
EXPECT_TRUE ( pIndex->Prealloc ( false, nullptr, dWarnings ) );
InsertDocData_c tDoc ( pIndex->GetMatchSchema() );
int iDynamic = pIndex->GetMatchSchema().GetRowSize();
RtAccum_t tAcc;
CSphString sFilter;
bool bEOF = false;
while (true)
{
EXPECT_TRUE ( pSrc->IterateDocument ( bEOF, sError ) );
if ( bEOF )
break;
tDoc.m_dFields = pSrc->GetFields();
tDoc.m_tDoc.Combine ( pSrc->m_tDocInfo, iDynamic );
pIndex->AddDocument ( tDoc, false, sFilter, sError, sWarning, &tAcc );
pIndex->Commit ( nullptr, &tAcc );
}
pSrc->Disconnect ();
ASSERT_EQ ( pSrc->GetStats ().m_iTotalDocuments, 1) << "docs committed";
CSphQuery tQuery;
AggrResult_t tResult;
CSphQueryResult tQueryResult;
tQueryResult.m_pMeta = &tResult;
CSphMultiQueryArgs tArgs ( 1 );
tQuery.m_sQuery = "@title cat";
auto pParser = sphCreatePlainQueryParser();
tQuery.m_pQueryParser = pParser.get();
SphQueueSettings_t tQueueSettings ( pIndex->GetMatchSchema () );
SphQueueRes_t tRes;
ISphMatchSorter * pSorter = sphCreateQueue ( tQueueSettings, tQuery, tResult.m_sError, tRes );
ASSERT_TRUE ( pSorter );
ASSERT_TRUE ( pIndex->MultiQuery ( tQueryResult, tQuery, { &pSorter, 1 }, tArgs ) );
auto & tOneRes = tResult.m_dResults.Add ();
tOneRes.FillFromSorter ( pSorter );
ASSERT_EQ ( tResult.GetLength (), 1 ) << "results found";
ASSERT_EQ ( tOneRes.m_dMatches[0].m_tRowID, 0 ) << "rowID" ;
ASSERT_EQ ( tOneRes.m_dMatches[0].m_iWeight, uParam) << "weight" ;
SafeDelete ( pSorter );
SafeDelete ( pSrc );
});
}
TEST_F ( RT, RankerFactors )
{
using namespace testing;
Threads::CallCoroutine ( [&] {
const char * dFields[] = {
"Seven lies multiplied by seven", "", "Multiplied by seven again", "", "Seven lies multiplied by seven"
, "Multiplied by seven again", "Mary vs Lamb", "Mary had a little lamb little lamb little lamb"
, "Mary vs Lamb 2: Return of The Lamb", "...whose fleece was white as snow", "Mary vs Lamb 3: The Resurrection"
, "Snow! Bloody snow!", "the who", "what the foo"
};
const char * dQueries[] = {
"seven !(angels !by)", // matched by 0-2
"Mary lamb", // matched by 3-5
"(the who) | (the foo)", // matched by 6
};
tCol.m_sName = "id";
tCol.m_eAttrType = SPH_ATTR_BIGINT;
tSrcSchema.AddAttr ( tCol, true );
tCol.m_sName = "idd";
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tSrcSchema.AddAttr ( tCol, true );
auto pDict = sphCreateDictionaryCRC ( tDictSettings, NULL, pTok, "rt", false, 32, nullptr, sError );
auto pSrc = new MockTestDoc_c ( tSrcSchema, ( BYTE ** ) dFields, sizeof ( dFields ) / sizeof ( dFields[0] ) / 2
, 2 );
EXPECT_CALL ( *pSrc, Connect ( _ ) ).WillOnce ( Return ( true ) );
EXPECT_CALL ( *pSrc, GetFieldLengths () ).Times ( 7 ).WillRepeatedly ( Return ( pSrc->m_dFieldLengths.Begin () ) );
EXPECT_CALL ( *pSrc, Disconnect () );
pSrc->SetTokenizer ( pTok );
pSrc->SetDict ( pDict );
pSrc->Setup ( CSphSourceSettings(), nullptr );
ASSERT_TRUE ( pSrc->Connect ( sError ) );
ASSERT_TRUE ( pSrc->IterateStart ( sError ) );
ASSERT_TRUE ( pSrc->UpdateSchema ( &tSrcSchema, sError ) );
CSphSchema tSchema; // source schema must be all dynamic attrs; but index ones must be static
for ( int i=0; i<tSrcSchema.GetFieldsCount(); i++ )
tSchema.AddField ( tSrcSchema.GetField(i) );
for ( int i=0; i<tSrcSchema.GetAttrsCount(); i++ )
tSchema.AddAttr ( tSrcSchema.GetAttr(i), false );
auto pIndex = sphCreateIndexRT ( "testrt", RT_INDEX_FILE_NAME, tSchema, 128 * 1024, false );
pIndex->SetTokenizer ( pTok ); // index will own this pair from now on
pIndex->SetDictionary ( sphCreateDictionaryCRC ( tDictSettings, nullptr, pTok, "rt", false, 32, nullptr, sError ) );
pIndex->PostSetup ();
StrVec_t dWarnings;
Verify ( pIndex->Prealloc ( false, nullptr, dWarnings ) );
CSphString sFilter;
InsertDocData_c tDoc ( pIndex->GetMatchSchema() );
int iDynamic = pIndex->GetMatchSchema().GetRowSize();
RtAccum_t tAcc;
bool bEOF = false;
while (true)
{
Verify ( pSrc->IterateDocument ( bEOF, sError ) );
if ( bEOF )
break;
tDoc.m_dFields = pSrc->GetFields();
tDoc.m_tDoc.Combine ( pSrc->m_tDocInfo, iDynamic );
pIndex->AddDocument ( tDoc, false, sFilter, sError, sWarning, &tAcc );
}
pIndex->Commit ( nullptr, &tAcc );
pSrc->Disconnect ();
CSphQuery tQuery;
CSphQueryItem &tFactor = tQuery.m_dItems.Add ();
tFactor.m_sExpr = "packedfactors()";
tFactor.m_sAlias = "pf";
tQuery.m_sRankerExpr = "1";
tQuery.m_eRanker = SPH_RANK_EXPR;
tQuery.m_eMode = SPH_MATCH_EXTENDED2;
tQuery.m_eSort = SPH_SORT_EXTENDED;
tQuery.m_sSortBy = "@weight desc";
tQuery.m_sOrderBy = "@weight desc";
auto pParser = sphCreatePlainQueryParser();
tQuery.m_pQueryParser = pParser.get();
AggrResult_t tResult;
CSphQueryResult tQueryResult;
tQueryResult.m_pMeta = &tResult;
CSphMultiQueryArgs tArgs ( 1 );
tArgs.m_uPackedFactorFlags = SPH_FACTOR_ENABLE | SPH_FACTOR_CALC_ATC;
SphQueueSettings_t tQueueSettings ( pIndex->GetMatchSchema () );
tQueueSettings.m_bComputeItems = true;
SphQueueRes_t tRes;
for ( auto szQuery : dQueries )
{
tQuery.m_sQuery = szQuery;
auto pSorter = sphCreateQueue ( tQueueSettings, tQuery, tResult.m_sError, tRes );
ASSERT_TRUE ( pSorter );
ASSERT_TRUE ( pIndex->MultiQuery ( tQueryResult, tQuery, { &pSorter, 1 }, tArgs ) );
auto & tOneRes = tResult.m_dResults.Add ();
tOneRes.FillFromSorter ( pSorter );
tResult.m_tSchema = *pSorter->GetSchema ();
const CSphAttrLocator &tLoc = tResult.m_tSchema.GetAttr ( "pf" )->m_tLocator;
for ( int iMatch = 0; iMatch<tOneRes.m_dMatches.GetLength (); ++iMatch )
{
const BYTE * pAttr = (const BYTE *) tOneRes.m_dMatches[iMatch].GetAttr ( tLoc );
ASSERT_TRUE ( pAttr );
auto * pFactors = (const unsigned int *) sphUnpackPtrAttr ( pAttr ).first;
SPH_UDF_FACTORS tUnpacked;
sphinx_factors_init ( &tUnpacked );
sphinx_factors_unpack ( pFactors, &tUnpacked );
// doc level factors
ASSERT_EQ ( tUnpacked.doc_bm25, sphinx_get_doc_factor_int ( pFactors, SPH_DOCF_BM25 ) );
ASSERT_EQ ( tUnpacked.doc_bm25a, sphinx_get_doc_factor_float ( pFactors, SPH_DOCF_BM25A ) );
ASSERT_EQ ( tUnpacked.matched_fields, sphinx_get_doc_factor_int ( pFactors, SPH_DOCF_MATCHED_FIELDS ) );
ASSERT_EQ ( tUnpacked.doc_word_count, sphinx_get_doc_factor_int ( pFactors, SPH_DOCF_DOC_WORD_COUNT ) );
ASSERT_EQ ( tUnpacked.num_fields, sphinx_get_doc_factor_int ( pFactors, SPH_DOCF_NUM_FIELDS ) );
ASSERT_EQ ( tUnpacked.max_uniq_qpos, sphinx_get_doc_factor_int ( pFactors, SPH_DOCF_MAX_UNIQ_QPOS ) );
// field level factors
for ( int iField = 0; iField<tUnpacked.num_fields; ++iField )
{
if ( !tUnpacked.field[iField].hit_count )
continue;
const unsigned int * pField = sphinx_get_field_factors ( pFactors, iField );
ASSERT_TRUE ( pField );
ASSERT_EQ ( tUnpacked.field[iField].hit_count, sphinx_get_field_factor_int ( pField
, SPH_FIELDF_HIT_COUNT ) );
ASSERT_EQ ( tUnpacked.field[iField].lcs, sphinx_get_field_factor_int ( pField, SPH_FIELDF_LCS ) );
ASSERT_EQ ( tUnpacked.field[iField].word_count, sphinx_get_field_factor_int ( pField
, SPH_FIELDF_WORD_COUNT ) );
ASSERT_EQ ( tUnpacked.field[iField].tf_idf, sphinx_get_field_factor_float ( pField
, SPH_FIELDF_TF_IDF ) );
ASSERT_EQ ( tUnpacked.field[iField].min_idf, sphinx_get_field_factor_float ( pField
, SPH_FIELDF_MIN_IDF ) );
ASSERT_EQ (
tUnpacked.field[iField].max_idf, sphinx_get_field_factor_float ( pField, SPH_FIELDF_MAX_IDF ) );
ASSERT_EQ (
tUnpacked.field[iField].sum_idf, sphinx_get_field_factor_float ( pField, SPH_FIELDF_SUM_IDF ) );
ASSERT_EQ ( tUnpacked.field[iField].min_hit_pos, sphinx_get_field_factor_int ( pField
, SPH_FIELDF_MIN_HIT_POS ) );
ASSERT_EQ ( tUnpacked.field[iField].min_best_span_pos, sphinx_get_field_factor_int ( pField
, SPH_FIELDF_MIN_BEST_SPAN_POS ) );
ASSERT_EQ ( tUnpacked.field[iField].max_window_hits, sphinx_get_field_factor_int ( pField
, SPH_FIELDF_MAX_WINDOW_HITS ) );
ASSERT_EQ (
tUnpacked.field[iField].min_gaps, sphinx_get_field_factor_int ( pField, SPH_FIELDF_MIN_GAPS ) );
ASSERT_EQ ( tUnpacked.field[iField].atc, sphinx_get_field_factor_float ( pField, SPH_FIELDF_ATC ) );
ASSERT_EQ ( tUnpacked.field[iField].lccs, sphinx_get_field_factor_int ( pField, SPH_FIELDF_LCCS ) );
ASSERT_EQ ( tUnpacked.field[iField].wlccs, sphinx_get_field_factor_float ( pField, SPH_FIELDF_WLCCS ) );
bool bExactHitSame = ( ( ( tUnpacked.field[iField].exact_hit << iField )
& sphinx_get_doc_factor_int ( pFactors, SPH_DOCF_EXACT_HIT_MASK ) )!=0 );
ASSERT_TRUE ( tUnpacked.field[iField].exact_hit==0 || bExactHitSame );
bool bExactOrderSame = ( ( ( tUnpacked.field[iField].exact_order << iField )
& sphinx_get_doc_factor_int ( pFactors, SPH_DOCF_EXACT_ORDER_MASK ) )!=0 );
ASSERT_TRUE ( tUnpacked.field[iField].exact_order==0 || bExactOrderSame );
}
// term level factors
for ( int iWord = 0; iWord<tUnpacked.max_uniq_qpos; iWord++ )
{
if ( !tUnpacked.term[iWord].keyword_mask )
continue;
const unsigned int * pTerm = sphinx_get_term_factors ( pFactors, iWord + 1 );
ASSERT_TRUE ( pTerm );
ASSERT_EQ ( tUnpacked.term[iWord].tf, sphinx_get_term_factor_int ( pTerm, SPH_TERMF_TF ) );
ASSERT_EQ ( tUnpacked.term[iWord].idf, sphinx_get_term_factor_float ( pTerm, SPH_TERMF_IDF ) );
}
sphinx_factors_deinit ( &tUnpacked );
SafeDelete ( pSorter );
}
}
SafeDelete ( pSrc );
pTok = nullptr; // owned and deleted by index
});
}
TEST_F ( RT, SendVsMerge )
{
using namespace testing;
Threads::CallCoroutine ( [&] {
auto pDict = sphCreateDictionaryCRC ( tDictSettings, NULL, pTok, "rt", false, 32, nullptr, sError );
tCol.m_sName = "id";
tCol.m_eAttrType = SPH_ATTR_BIGINT;
tSrcSchema.AddAttr ( tCol, true );
tCol.m_sName = "tag1";
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tSrcSchema.AddAttr ( tCol, true );
tCol.m_sName = "tag2";
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tSrcSchema.AddAttr ( tCol, true );
auto pSrc = new MockDocRandomizer_c ( tSrcSchema );
EXPECT_CALL ( *pSrc, Connect ( _ ) ).WillOnce ( Return ( true ) );
EXPECT_CALL ( *pSrc, GetFieldLengths () ).Times ( 801 ).WillRepeatedly ( Return ( pSrc->m_dFieldLengths ) );
EXPECT_CALL ( *pSrc, Disconnect () );
pSrc->SetTokenizer ( pTok );
pSrc->SetDict ( pDict );
pSrc->Setup ( CSphSourceSettings(), nullptr );
ASSERT_TRUE ( pSrc->Connect ( sError ) );
ASSERT_TRUE ( pSrc->IterateStart ( sError ) );
ASSERT_TRUE ( pSrc->UpdateSchema ( &tSrcSchema, sError ) );
CSphSchema tSchema; // source schema must be all dynamic attrs; but index ones must be static
for ( int i=0; i<tSrcSchema.GetFieldsCount(); i++ )
tSchema.AddField ( tSrcSchema.GetField(i) );
for ( int i=0; i<tSrcSchema.GetAttrsCount(); i++ )
tSchema.AddAttr ( tSrcSchema.GetAttr(i), false );
auto pIndex = sphCreateIndexRT ( "testrt", RT_INDEX_FILE_NAME, tSchema, 128 * 1024, false );
pIndex->SetTokenizer ( pTok ); // index will own this pair from now on
pIndex->SetDictionary ( pDict );
pIndex->PostSetup ();
StrVec_t dWarnings;
ASSERT_TRUE ( pIndex->Prealloc ( false, nullptr, dWarnings ) );
CSphQuery tQuery;
AggrResult_t tResult;
CSphQueryResult tQueryResult;
tQueryResult.m_pMeta = &tResult;
CSphMultiQueryArgs tArgs ( 1 );
tQuery.m_sQuery = "@title cat";
auto pParser = sphCreatePlainQueryParser();
tQuery.m_pQueryParser = pParser.get();
CSphQueryItem & tItem = tQuery.m_dItems.Add ();
tItem.m_sExpr = "*";
tItem.m_sAlias = "*";
tQuery.m_sSelect = "*";
SphQueueSettings_t tQueueSettings ( pIndex->GetMatchSchema () );
tQueueSettings.m_bComputeItems = true;
SphQueueRes_t tRes;
auto pSorter = sphCreateQueue ( tQueueSettings, tQuery, tResult.m_sError, tRes );
ASSERT_TRUE ( pSorter );
CSphString sFilter;
InsertDocData_c tDoc ( pIndex->GetMatchSchema() );
int iDynamic = pIndex->GetMatchSchema().GetRowSize();
RtAccum_t tAcc;
bool bEOF = false;
while (true)
{
ASSERT_TRUE ( pSrc->IterateDocument ( bEOF, sError ) );
if ( bEOF )
break;
tDoc.m_dFields = pSrc->GetFields();
tDoc.m_tDoc.Combine ( pSrc->m_tDocInfo, iDynamic );
pIndex->AddDocument ( tDoc, false, sFilter, sError, sWarning, &tAcc );
sError = ""; // need to reset error message
if ( pSrc->m_iDocsCounter==350 )
{
pIndex->Commit ( NULL, &tAcc );
EXPECT_TRUE ( pIndex->MultiQuery ( tQueryResult, tQuery, { &pSorter, 1 }, tArgs ) );
auto & tOneRes = tResult.m_dResults.Add ();
tOneRes.FillFromSorter ( pSorter );
}
}
pIndex->Commit ( NULL, &tAcc );
pSrc->Disconnect ();
tResult.m_tSchema = *pSorter->GetSchema ();
auto & tOneRes = tResult.m_dResults.First ();
ASSERT_EQ ( tResult.GetLength (), 20 );
for ( int i = 0; i<tResult.GetLength (); ++i )
{
const RowID_t uID = tOneRes.m_dMatches[i].m_tRowID;
const SphAttr_t tTag1 = tOneRes.m_dMatches[i].GetAttr ( tResult.m_tSchema.GetAttr ( 0 ).m_tLocator );
const SphAttr_t tTag2 = tOneRes.m_dMatches[i].GetAttr ( tResult.m_tSchema.GetAttr ( 1 ).m_tLocator );
ASSERT_TRUE ( ( RowID_t ) tTag1==uID + 1000 );
ASSERT_TRUE ( tTag2==1313 );
}
SafeDelete ( pSorter );
SafeDelete ( pSrc );
pTok = nullptr; // owned and deleted by index
});
}
| 21,895
|
C++
|
.cpp
| 527
| 38.487666
| 166
| 0.688168
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,990
|
gtests_json.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_json.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <gtest/gtest.h>
#include "json/cJSON.h"
#include "sphinxjson.h"
#include "sphinxjsonquery.h"
// Miscelaneous short tests for json/cjson
//////////////////////////////////////////////////////////////////////////
TEST ( CJson, basics )
{
sphInitCJson();
struct MyIndex_t
{
CSphString m_sName;
CSphString m_sPath;
};
CSphString sResult;
CSphVector<MyIndex_t> dIndexes;
dIndexes.Add ( { "test1", "test1_path" } );
dIndexes.Add ( { "test2", "test2_path" } );
dIndexes.Add ( { "test3", "test3_path" } );
{
cJSON * pRoot = cJSON_CreateObject ();
ASSERT_TRUE ( pRoot );
cJSON * pIndexes = cJSON_CreateArray ();
ASSERT_TRUE ( pIndexes );
cJSON_AddItemToObject ( pRoot, "indexes", pIndexes );
for ( auto i : dIndexes )
{
cJSON * pIndex = cJSON_CreateObject ();
ASSERT_TRUE ( pIndex );
cJSON_AddItemToArray ( pIndexes, pIndex );
cJSON_AddStringToObject ( pIndex, "name", i.m_sName.cstr () );
cJSON_AddStringToObject ( pIndex, "path", i.m_sPath.cstr () );
}
char * szResult = cJSON_Print ( pRoot );
sResult.Adopt ( &szResult );
cJSON_Delete ( pRoot );
}
{
const char * dContents = sResult.cstr ();
cJSON * pRoot = cJSON_Parse ( dContents );
EXPECT_TRUE ( pRoot );
cJSON * pIndexes = cJSON_GetObjectItem ( pRoot, "indexes" );
EXPECT_TRUE ( pIndexes );
int iNumIndexes = cJSON_GetArraySize ( pIndexes );
ASSERT_EQ ( iNumIndexes, dIndexes.GetLength () );
int iItem = 0;
for ( auto i : dIndexes )
{
cJSON * pIndex = cJSON_GetArrayItem ( pIndexes, iItem++ );
EXPECT_TRUE ( pIndex );
cJSON * pJ;
pJ = cJSON_GetObjectItem ( pIndex, "name" );
EXPECT_TRUE ( pJ );
ASSERT_EQ ( i.m_sName, pJ->valuestring );
pJ = cJSON_GetObjectItem ( pIndex, "path" );
EXPECT_TRUE ( pJ );
ASSERT_EQ ( i.m_sPath, pJ->valuestring );
}
cJSON_Delete ( pRoot );
}
}
TEST ( CJson, format )
{
sphInitCJson();
cJSON * pJson = cJSON_CreateObject ();
cJSON_AddStringToObject ( pJson, "escaped", " \" quote \\ slash \b b \f feed \n n \r r \t tab \005 / here " );
char * szResult = cJSON_PrintUnformatted ( pJson );
CSphString sResult ( szResult );
printf ( "\n%s\n", szResult );
SafeDeleteArray ( szResult );
JsonEscapedBuilder tBuild;
tBuild.StartBlock (":", "{", "}");
tBuild.AppendString ("escaped", '\"');
tBuild.AppendEscaped ( " \" quote \\ slash \b b \f feed \n n \r r \t tab \005 / here ", EscBld::eEscape );
tBuild.FinishBlocks ();
printf ( "\n%s\n", tBuild.cstr() );
cJSON_Delete ( pJson );
}
int JsonStrUnescape ( char* pTarget, const CSphString& sSource )
{
if (!sSource.IsEmpty ())
{
auto iRes = JsonUnescape ( pTarget, sSource.cstr(), sSource.Length ());
pTarget[iRes] = '\0';
assert ( iRes<=sSource.Length() );
return iRes;
}
return 0;
}
namespace {
void te (const char* src, const char* target)
{
char buf[100];
int iRes = JsonUnescape ( buf, src, (int)strlen(src) );
buf[iRes]='\0';
assert ( iRes<=(int)strlen(src));
ASSERT_STREQ (target,buf);
}
}
TEST (integrity, JsonUnescape)
{
char buf[100];
// autoremove heading and trailing quotes "
JsonStrUnescape(buf,"\"Hello world\"");
ASSERT_STREQ (buf,"Hello world");
// autoremove heading and trailing quotes '
JsonStrUnescape ( buf, "'Hello world'" );
ASSERT_STREQ ( buf, "Hello world" );
// cases of escaped syms
te ( R"(_\b_)", "_\b_" );
te ( R"(_\n_)", "_\n_" );
te ( R"(_\r_)", "_\r_" );
te ( R"(_\t_)", "_\t_" );
te ( R"(_\f_)", "_\f_" );
// assert nothing apart above is unescaped
char tst[10]; tst[0]=tst[3]='_'; tst[1]='\\'; tst[4] = '\0';
char dst[10]; dst[0]=dst[2]='_'; dst[3] = '\0';
for (unsigned char c='a';c<255;++c)
{
if (c!='b'&&c!='n'&&c!='r'&&c!='t'&&c!='f')
{
tst[2] = c;
dst[1] = c;
te ( tst, dst );
}
}
JsonStrUnescape(buf,R"(\n\r\b)");
ASSERT_STREQ ( buf, "\n\r\b" );
JsonStrUnescape ( buf, R"(\u000Aabc)" );
ASSERT_STREQ ( buf, "\nabc" );
JsonStrUnescape ( buf, R"(\u001xbc)" );
ASSERT_STREQ ( buf, "u001xbc" );
auto iRes = JsonStrUnescape ( buf, R"(\uD801\uDC01abc)" );
ASSERT_EQ ( iRes, 7 );
// regression: check that trailing \\ is not causes reading over the end of the buff
// (run under valgrind)
char* edge = new char[10];
strncpy(edge,R"(\u000Aabc\)",10);
iRes = JsonUnescape ( buf, edge, 10);
SafeDeleteArray ( edge );
buf[iRes] = '\0';
ASSERT_STREQ ( buf, "\nabc" );
JsonStrUnescape ( buf, R"(\uD801\uDBFFabc)" );
ASSERT_STREQ ( buf, "uD801uDBFFabc" );
}
using namespace bson;
class TJson : public ::testing::Test
{
protected:
virtual void SetUp ()
{
dData.Reset();
sError = "";
}
CSphVector<BYTE> dData;
CSphString sError;
bool testcase ( const char * sJson, bool bAutoconv = false, bool bToLowercase = false, bool bCheckSize = true )
{
CSphString sText = sJson;
return sphJsonParse ( dData, ( char * ) sText.cstr (), bAutoconv, bToLowercase, bCheckSize, sError );
}
void TestConv ( const Bson_c& dNode, const char * sProof )
{
CSphVector<BYTE> dRoot;
CSphString sResult;
dNode.BsonToBson ( dRoot );
Bson_c ( dRoot ).BsonToJson ( sResult );
ASSERT_STREQ ( sResult.cstr (), sProof );
}
// helper: parse given str into internal bson
NodeHandle_t Bson ( const char * sJson )
{
CSphString sText { sJson, CSphString::always_create };
CSphString sParseError;
dData.Reset ();
sphJsonParse ( dData, ( char * ) sText.cstr(), false, true, true, sParseError );
if ( dData.IsEmpty () )
return nullnode;
NodeHandle_t sResult;
const BYTE * pData = dData.begin ();
sResult.second = sphJsonFindFirst ( &pData );
sResult.first = pData;
return sResult;
}
// helper : parse given str into internal bson and split it to variables
CSphVector<Bson_c> Bsons ( const char * sJson)
{
Bson_c dRoot ( Bson ( sJson ));
CSphVector<Bson_c> dResult;
dRoot.ForEach([&](const NodeHandle_t& dNode){
dResult.Add (dNode);
});
return dResult;
}
};
TEST_F( TJson, parser )
{
ASSERT_TRUE ( testcase ( R"({sv:["one","two","three"],sp:["foo","fee"],gid:315})" ) );
// 0 5 11 17 26 30 36 43 47
ASSERT_TRUE ( testcase ( "[]", true, false ) );
ASSERT_TRUE ( testcase ( R"({"name":"Alice","uid":123})" ) );
ASSERT_TRUE ( testcase ( R"({key1:{key2:{key3:"value"}}})" ) );
ASSERT_TRUE ( testcase ( R"([6,[6,[6,[6,6.0]]]])" ) );
ASSERT_TRUE ( testcase ( R"({"name":"Bob","uid":234,"gid":12})" ) );
ASSERT_TRUE ( testcase ( R"({"name":"Charlie","uid":345})" ) );
ASSERT_TRUE ( testcase ( R"({"12":345, "34":"567"})", true ) );
ASSERT_TRUE ( testcase ( R"({
i1:"123",
i2:"-123",
i3:"18446744073709551615",
i4:"-18446744073709551615",
i5:"9223372036854775807",
i6:"9223372036854775808",
i7:"9223372036854775809",
i8:"-9223372036854775807",
i9:"-9223372036854775808",
i10:"-9223372036854775809",
i11:"123abc",
i12:"-123abc",
f1:"3.15",
f2:"16777217.123"})", true ) );
ASSERT_TRUE ( testcase ( R"({
i11:"123abc",
i12:"-123abc",
f1:"3.15",
f2:"16777217.123"})", true ) );
ASSERT_TRUE ( testcase ( R"({"a":{"b":0,"c":0},"d":[]})") );
ASSERT_TRUE ( testcase ( R"({"a":{"b":0,"c":0},"d":[2,3333333333333333,45,-235]})" ) );
}
TEST_F ( TJson, accessor )
{
BsonContainer_c dBson (
R"({ "query": { "percolate": { "document" : { "title" : "A new tree test in the office office" } } } })");
auto dDocs = dBson.ChildByPath ("query.percolate.document");
// auto pMember = dBson->ChildByPath ( "query.percolate.document" );
// auto pMembers = dBson->ChildByPath ( "query.percolate.documents" );
// auto dQuery = (*dBson)["percolate"];
// auto dDoc = (*dQuery)["documeht"];
auto VARIABLE_IS_NOT_USED dTitle = Bson_c ( dDocs ).ChildByPath ( "title" );
auto VARIABLE_IS_NOT_USED dTitle1 = Bson_c ( dDocs ).ChildByName ( "title" );
ASSERT_TRUE (true);
}
// test bson::Bool
TEST_F ( TJson, bson_Bool )
{
auto tst = Bsons ("[12345678, 0, 123456789000000, 1.0, 0.0, true, false, \"abc\", {}, []]");
ASSERT_TRUE ( tst[0].Bool () );
ASSERT_FALSE ( tst[1].Bool () );
ASSERT_TRUE ( tst[2].Bool () );
ASSERT_TRUE ( tst[3].Bool () );
ASSERT_FALSE ( tst[4].Bool () );
ASSERT_TRUE ( tst[5].Bool () );
ASSERT_FALSE ( tst[6].Bool () );
ASSERT_FALSE ( tst[7].Bool () );
ASSERT_FALSE ( tst[8].Bool () );
ASSERT_FALSE ( tst[9].Bool () );
}
// test bson::Int
TEST_F ( TJson, bson_Int )
{
auto tst = Bsons ( R"([12345678, 123456789000000, 1.0, true, false, "123","1.13","123abc", {}, []])" );
ASSERT_EQ ( tst[0].Int (), 12345678 );
ASSERT_EQ ( tst[1].Int (), 123456789000000 );
ASSERT_EQ ( tst[2].Int (), 1 );
ASSERT_EQ ( tst[3].Int (), 1 );
ASSERT_EQ ( tst[4].Int (), 0 );
ASSERT_EQ ( tst[5].Int (), 123 );
ASSERT_EQ ( tst[6].Int (), 1 );
ASSERT_EQ ( tst[7].Int (), 0 );
ASSERT_EQ ( tst[8].Int (), 0 );
ASSERT_EQ ( tst[9].Int (), 0 );
}
// test bson::Double
TEST_F ( TJson, bson_Double )
{
auto tst = Bsons ( R"([12345678, 123456789000000, 1.23, true, false, "123","1.13","123abc", {}, []])" );
ASSERT_EQ ( tst[0].Double (), 12345678.0 );
ASSERT_EQ ( tst[1].Double (), 123456789000000.0 );
ASSERT_EQ ( tst[2].Double (), 1.23 );
ASSERT_EQ ( tst[3].Double (), 1.0 );
ASSERT_EQ ( tst[4].Double (), 0.0 );
ASSERT_EQ ( tst[5].Double (), 123.0 );
ASSERT_EQ ( tst[6].Double (), 1.13 );
ASSERT_EQ ( tst[7].Double (), 0.0 );
ASSERT_EQ ( tst[8].Double (), 0.0 );
ASSERT_EQ ( tst[9].Double (), 0.0 );
}
TEST_F ( TJson, bson_ScientificDouble )
{
auto tst = Bsons ( R"([1e-5, 1e5, -1e-5, -1e5, 6.022e+3, 1.4738223E-1])" );
ASSERT_DOUBLE_EQ ( tst[0].Double (), 0.00001 );
ASSERT_DOUBLE_EQ ( tst[1].Double (), 100000.0 );
ASSERT_DOUBLE_EQ ( tst[2].Double (), -0.00001 );
ASSERT_DOUBLE_EQ ( tst[3].Double (), -100000.0 );
ASSERT_DOUBLE_EQ ( tst[4].Double (), 6022.0 );
ASSERT_DOUBLE_EQ ( tst[5].Double (), 0.14738223 );
}
// test bson::String
TEST_F ( TJson, bson_String )
{
auto tst = Bsons ( R"([12345678, 123456789000000, 1.23, true, false, "123","1.13","123abc", {}, []])" );
ASSERT_STREQ ( tst[0].String ().cstr (), "" );
ASSERT_STREQ ( tst[1].String ().cstr (), "" );
ASSERT_STREQ ( tst[2].String ().cstr (), "" );
ASSERT_STREQ ( tst[3].String ().cstr (), "" );
ASSERT_STREQ ( tst[4].String ().cstr (), "" );
ASSERT_STREQ ( tst[5].String ().cstr (), "123" );
ASSERT_STREQ ( tst[6].String ().cstr (), "1.13" );
ASSERT_STREQ ( tst[7].String ().cstr (), "123abc" );
ASSERT_STREQ ( tst[8].String ().cstr (), "" );
ASSERT_STREQ ( tst[9].String ().cstr (), "" );
}
TEST_F ( TJson, bson_Stringvec )
{
auto szJson = R"({sv:["one","two","three" , "четыре"],gid:315})";
Bson_c tst = Bson ( szJson );
ESphJsonType dTypes[] = {JSON_STRING_VECTOR, JSON_INT32};
int iIdx = 0;
tst.ForEach ( [&] ( const NodeHandle_t & dNode ) {
ASSERT_EQ ( dNode.second, dTypes[iIdx++] );
} );
CSphString sBack;
ASSERT_TRUE (tst.BsonToJson ( sBack ));
ASSERT_STREQ ( sBack.cstr(), R"({"sv":["one","two","three","четыре"],"gid":315})");
}
// "foreach" over the vec
TEST_F ( TJson, bson_foreach_vec )
{
Bson_c tst = Bson ( R"([12345678, 123456789000000, 1.23, true, false, "123","1.13","123abc", {}, []])" );
ESphJsonType dTypes[] = {JSON_INT32,JSON_INT64,JSON_DOUBLE,JSON_TRUE,JSON_FALSE,JSON_STRING,
JSON_STRING,JSON_STRING,JSON_OBJECT,JSON_MIXED_VECTOR};
int iIdx = 0;
tst.ForEach ([&](const NodeHandle_t& dNode){
ASSERT_EQ ( dNode.second, dTypes[iIdx++] );
});
iIdx = 0;
tst.ForEach ( [&] ( Bson_c dNode ) {
ASSERT_EQ ( dNode.GetType (), dTypes[iIdx++] );
} );
}
// named "foreach" over the vec
TEST_F ( TJson, bson_foreach_namedvec )
{
Bson_c tst = Bson ( R"([12345678, 123456789000000, 1.23, true, false, "123","1.13","123abc", {}, []])" );
ESphJsonType dTypes[] = { JSON_INT32, JSON_INT64, JSON_DOUBLE, JSON_TRUE, JSON_FALSE, JSON_STRING, JSON_STRING
, JSON_STRING, JSON_OBJECT, JSON_MIXED_VECTOR };
int iIdx = 0;
tst.ForEach ( [&] ( CSphString&& sName, const NodeHandle_t &dNode ) {
ASSERT_STREQ (sName.cstr(),"");
ASSERT_EQ ( dNode.second, dTypes[iIdx++] );
} );
iIdx = 0;
tst.ForEach ( [&] ( CSphString&& sName, Bson_c dNode ) {
ASSERT_STREQ ( sName.cstr (), "" );
ASSERT_EQ ( dNode.GetType (), dTypes[iIdx++] );
} );
}
// "foreach" over obj
TEST_F ( TJson, bson_foreach_obj )
{
Bson_c tst = Bson ( R"({a:12345678, b:123456789000000, c:1.23, d:true, e:false, f:"123",g:"1.13",H:"123abc", i:{}, j:[]])");
ESphJsonType dTypes[] = { JSON_INT32, JSON_INT64, JSON_DOUBLE, JSON_TRUE, JSON_FALSE, JSON_STRING, JSON_STRING
, JSON_STRING, JSON_OBJECT, JSON_MIXED_VECTOR };
int iIdx = 0;
tst.ForEach ( [&] ( const NodeHandle_t &dNode ) {
ASSERT_EQ ( dNode.second, dTypes[iIdx++] );
} );
iIdx = 0;
tst.ForEach ( [&] ( Bson_c dNode ) {
ASSERT_EQ ( dNode.GetType (), dTypes[iIdx++] );
} );
}
// named "foreach" over obj
TEST_F ( TJson, bson_foreach_namedobj )
{
Bson_c tst = Bson (
R"({a:12345678, b:123456789000000, c:1.23, d:true, e:false, f:"123",g:"1.13",H:"123abc", i:{}, j:[]])" );
ESphJsonType dTypes[] = { JSON_INT32, JSON_INT64, JSON_DOUBLE, JSON_TRUE, JSON_FALSE, JSON_STRING, JSON_STRING
, JSON_STRING, JSON_OBJECT, JSON_MIXED_VECTOR };
const char* sNames[] = {"a","b","c","d","e","f","g",
"h", // note that name is lowercase in opposite 'H' in the object
"i","j"};
int iIdx = 0;
tst.ForEach ( [&] ( CSphString&& sName, const NodeHandle_t &dNode ) {
ASSERT_STREQ ( sName.cstr (), sNames[iIdx] );
ASSERT_EQ ( dNode.second, dTypes[iIdx++] );
} );
iIdx = 0;
tst.ForEach ( [&] ( CSphString&& sName, Bson_c dNode ) {
ASSERT_STREQ ( sName.cstr (), sNames[iIdx] );
ASSERT_EQ ( dNode.GetType (), dTypes[iIdx++] );
} );
}
void assert_eq( ESphJsonType a, ESphJsonType b )
{
ASSERT_EQ (a, b);
}
// "forsome" over the vec
TEST_F ( TJson, bson_forsome_vec )
{
Bson_c tst = Bson ( R"([12345678, 123456789000000, 1.23, true, false, "123","1.13","123abc", {}, []])" );
ESphJsonType dTypes[] = { JSON_INT32, JSON_INT64, JSON_DOUBLE, JSON_TRUE, JSON_FALSE, JSON_STRING, JSON_STRING
, JSON_STRING, JSON_OBJECT, JSON_MIXED_VECTOR };
int iIdx = 0;
tst.ForSome ( [&] ( const NodeHandle_t &tNode ) {
// this strange lambda here is need because ASSERT_EQ macro confuses outside lambda's deduction
[&] () { ASSERT_EQ ( tNode.second, dTypes[iIdx++] ); } ();
return iIdx<4;
} );
ASSERT_EQ (iIdx,4);
iIdx = 0;
tst.ForSome ( [&] ( Bson_c dNode ) {
[&] () { ASSERT_EQ ( dNode.GetType (), dTypes[iIdx++] ); } ();
return iIdx<4;
} );
ASSERT_EQ ( iIdx, 4 );
}
// named "forsome" over the vec
TEST_F ( TJson, bson_forsome_namedvec )
{
Bson_c tst = Bson ( R"([12345678, 123456789000000, 1.23, true, false, "123","1.13","123abc", {}, []])" );
ESphJsonType dTypes[] = { JSON_INT32, JSON_INT64, JSON_DOUBLE, JSON_TRUE, JSON_FALSE, JSON_STRING, JSON_STRING
, JSON_STRING, JSON_OBJECT, JSON_MIXED_VECTOR };
int iIdx = 0;
tst.ForSome ( [&] ( CSphString&& sName, const NodeHandle_t &dNode ) {
[&] () {
ASSERT_STREQ ( sName.cstr (), "" );
ASSERT_EQ ( dNode.second, dTypes[iIdx++] );
} ();
return iIdx<4;
} );
ASSERT_EQ ( iIdx, 4 );
iIdx = 0;
tst.ForSome ( [&] ( CSphString&& sName, Bson_c dNode ) {
[&] () {
ASSERT_STREQ ( sName.cstr (), "" );
ASSERT_EQ ( dNode.GetType (), dTypes[iIdx++] );
} ();
return iIdx<4;
} );
ASSERT_EQ ( iIdx, 4 );
}
// "forsome" over obj
TEST_F ( TJson, bson_forsome_obj )
{
Bson_c tst = Bson (
R"({a:12345678, b:123456789000000, c:1.23, d:true, e:false, f:"123",g:"1.13",H:"123abc", i:{}, j:[]})" );
ESphJsonType dTypes[] = { JSON_INT32, JSON_INT64, JSON_DOUBLE, JSON_TRUE, JSON_FALSE, JSON_STRING, JSON_STRING
, JSON_STRING, JSON_OBJECT, JSON_MIXED_VECTOR };
int iIdx = 0;
tst.ForSome ( [&] ( const NodeHandle_t &dNode ) {
[&] () { ASSERT_EQ ( dNode.second, dTypes[iIdx++] ); } ();
return iIdx<4;
} );
ASSERT_EQ ( iIdx, 4 );
iIdx = 0;
tst.ForSome ( [&] ( Bson_c dNode ) {
[&] () { ASSERT_EQ ( dNode.GetType (), dTypes[iIdx++] ); } ();
return iIdx<4;
} );
ASSERT_EQ ( iIdx, 4 );
}
// named "forsome" over obj
TEST_F ( TJson, bson_forsome_namedobj )
{
Bson_c tst = Bson (
R"({a:12345678, b:123456789000000, c:1.23, d:true, e:false, f:"123",g:"1.13",H:"123abc", i:{}, j:[]})" );
ESphJsonType dTypes[] = { JSON_INT32, JSON_INT64, JSON_DOUBLE, JSON_TRUE, JSON_FALSE, JSON_STRING, JSON_STRING
, JSON_STRING, JSON_OBJECT, JSON_MIXED_VECTOR };
const char * sNames[] = { "a", "b", "c", "d", "e", "f", "g", "h"
, // note that name is lowercase in opposite 'H' in the object
"i", "j" };
int iIdx = 0;
tst.ForSome ( [&] ( CSphString&& sName, const NodeHandle_t &dNode ) {
[&] () {
ASSERT_STREQ ( sName.cstr (), sNames[iIdx] );
ASSERT_EQ ( dNode.second, dTypes[iIdx++] );
} ();
return iIdx<4;
} );
ASSERT_EQ (iIdx, 4);
iIdx = 0;
tst.ForSome ( [&] ( CSphString&& sName, Bson_c dNode ) {
[&] () {
ASSERT_STREQ ( sName.cstr (), sNames[iIdx] );
ASSERT_EQ ( dNode.GetType (), dTypes[iIdx++] );
} ();
return iIdx<4;
} );
ASSERT_EQ ( iIdx, 4 );
}
TEST_F ( TJson, bson_rawblob )
{
// blob of ints
Bson_c tst = Bson ( "[0,1,2,3,4]" );
auto dBlob = bson::RawBlob ( tst );
ASSERT_EQ ( dBlob.second, 5 );
auto pValues = (int*)dBlob.first;
for (int i=0; i<4; ++i)
ASSERT_EQ ( pValues[i], i);
// blob of mixed (must not work)
tst = Bson ( "[0,1,2,300000000000000,4]" );
dBlob = bson::RawBlob ( tst );
ASSERT_EQ ( dBlob.second, 0 ); // since values are different, Bson is mixed vector, which can't be blob
// blob of int64
tst = Bson ( "[100000000000,100000000001,100000000002,100000000003,100000000004]" );
dBlob = bson::RawBlob ( tst );
ASSERT_EQ ( dBlob.second, 5 );
auto pValues64 = ( int64_t * ) dBlob.first;
ASSERT_EQ ( pValues64[0], 100000000000);
ASSERT_EQ ( pValues64[1], 100000000001 );
ASSERT_EQ ( pValues64[2], 100000000002 );
ASSERT_EQ ( pValues64[3], 100000000003 );
ASSERT_EQ ( pValues64[4], 100000000004 );
// blob of doubles
tst = Bson ( "[0.0,0.1,0.2,0.3,0.4]" );
dBlob = bson::RawBlob ( tst );
ASSERT_EQ ( dBlob.second, 5 );
auto pValuesD = ( double * ) dBlob.first;
ASSERT_EQ ( pValuesD[0], 0.0 );
ASSERT_EQ ( pValuesD[1], 0.1 );
ASSERT_EQ ( pValuesD[2], 0.2 );
ASSERT_EQ ( pValuesD[3], 0.3 );
ASSERT_EQ ( pValuesD[4], 0.4 );
// string is also may be traited as blob
tst = Bson_c(Bson( "[\"Hello world!\"]" )).ChildByIndex (0);
dBlob = bson::RawBlob ( tst );
ASSERT_EQ ( dBlob.second, strlen("Hello world!") );
ASSERT_EQ ( 0, memcmp (dBlob.first, "Hello world!", strlen("Hello world!")));
}
// test property "IsEmpty"
TEST_F ( TJson, bson_IsEmpty )
{
ASSERT_TRUE ( Bson_c ( Bson ( "" ) ).IsEmpty () );
ASSERT_TRUE ( Bson_c ( Bson ( "[]" ) ).IsEmpty () );
ASSERT_TRUE ( Bson_c ( Bson ( "{}" ) ).IsEmpty () );
ASSERT_FALSE ( Bson_c ( Bson ( "{a:2}" ) ).IsEmpty () );
ASSERT_FALSE ( Bson_c ( Bson ( R"(["a","b"])" ) ).IsEmpty () );
ASSERT_FALSE ( Bson_c ( Bson ( R"(["a","b",3])" ) ).IsEmpty () );
ASSERT_FALSE ( Bson_c ( Bson ( R"([1])" ) ).IsEmpty () );
}
// test counting of values
TEST_F ( TJson, bson_CountValues )
{
ASSERT_EQ ( Bson_c ( Bson ( "" ) ).CountValues (), 0 );
ASSERT_EQ ( Bson_c ( Bson ( "{}" ) ).CountValues (), 0 );
auto tst = Bsons ( R"([1,1.0,["a","b"],[1,"a"],[1,2],[1.0,2.0],{a:1,b:2,c:3}, {}, [], true, false])" );
ASSERT_EQ ( tst[0].CountValues (), 1 );
ASSERT_EQ ( tst[1].CountValues (), 1 );
ASSERT_EQ ( tst[2].CountValues (), 2 );
ASSERT_EQ ( tst[3].CountValues (), 2 );
ASSERT_EQ ( tst[4].CountValues (), 2 );
ASSERT_EQ ( tst[5].CountValues (), 2 );
ASSERT_EQ ( tst[6].CountValues (), 3 );
ASSERT_EQ ( tst[7].CountValues (), 0 );
ASSERT_EQ ( tst[8].CountValues (), 0 );
ASSERT_EQ ( tst[9].CountValues (), 0 );
ASSERT_EQ ( tst[10].CountValues (), 0 );
}
// test standalone size
TEST_F ( TJson, bson_standalonesize )
{
ASSERT_EQ ( Bson_c ( Bson ( "" ) ).StandaloneSize (), 5 );
ASSERT_EQ ( Bson_c ( Bson ( "{}" ) ).StandaloneSize (), 5 );
const char* sJson = R"([1,1.0,["a","b"],[1,"a"],[1,2],[1.0,2.0],{a:1,b:2,c:3}, {}, [], true, false, null])";
auto tst = Bsons ( sJson );
ASSERT_EQ ( tst[0].StandaloneSize (), -1 );
ASSERT_EQ ( tst[1].StandaloneSize (), -1 );
ASSERT_EQ ( tst[2].StandaloneSize (), 11 );
ASSERT_EQ ( tst[3].StandaloneSize (), 15 );
ASSERT_EQ ( tst[4].StandaloneSize (), 14 );
ASSERT_EQ ( tst[5].StandaloneSize (), 22 );
ASSERT_EQ ( tst[6].StandaloneSize (), 26 );
ASSERT_EQ ( tst[7].StandaloneSize (), 5 );
ASSERT_EQ ( tst[8].StandaloneSize (), 7 );
ASSERT_EQ ( tst[9].StandaloneSize (), -1 );
ASSERT_EQ ( tst[10].StandaloneSize (), -1 );
ASSERT_EQ ( tst[11].StandaloneSize (), -1 );
ASSERT_EQ ( Bson_c ( Bson ( sJson ) ).StandaloneSize (), 108 );
}
// test str comparision
TEST_F ( TJson, bson_StrEq )
{
auto tst = Bsons ( R"(["hello","World!"])" );
ASSERT_TRUE ( tst[0].StrEq ( "hello" ) );
ASSERT_FALSE ( tst[0].StrEq ( "Hello" ) );
ASSERT_TRUE ( tst[1].StrEq ( "World!" ) );
ASSERT_FALSE ( tst[1].StrEq ( "world!" ) );
ASSERT_FALSE ( tst[1].StrEq ( "world" ) );
}
// test access direct children of assocs by name
TEST_F ( TJson, bson_child_by_name )
{
Bson_c tst = Bson ( R"({first :1, Second: 2,"third" :3,"Fourth":4})" );
ASSERT_EQ ( Bson_c ( tst.ChildByName ( "first" ) ).Int (), 1 );
ASSERT_EQ ( Bson_c ( tst.ChildByName ( "second" ) ).Int (), 2 );
ASSERT_EQ ( Bson_c ( tst.ChildByName ( "third" ) ).Int (), 3 );
ASSERT_EQ ( Bson_c ( tst.ChildByName ( "fourth" ) ).Int (), 4 );
// no access by index to object members!
ASSERT_TRUE ( Bson_c ( tst.ChildByIndex ( 0 ) ).IsNull () );
}
// test access to children of array by idx
TEST_F ( TJson, bson_child_by_index )
{
Bson_c tst = Bson ( R"([1,"abc",2.2])" );
ASSERT_EQ ( Bson_c ( tst.ChildByIndex ( 0 ) ).Int (), 1 );
ASSERT_TRUE ( Bson_c ( tst.ChildByIndex ( 1 ) ).StrEq("abc") );
ASSERT_EQ ( Bson_c ( tst.ChildByIndex ( 2 ) ).Double (), 2.2 );
}
// test access to children of array/obj by complex path
TEST_F ( TJson, bson_child_by_path )
{
Bson_c tst = Bson ( R"({name:"hello",value:[1,2,{syntax:[1,3,42,13],value:"Here"},"blabla"]})" );
ASSERT_TRUE ( Bson_c ( tst.ChildByPath ( "name" ) ).StrEq ("hello") );
ASSERT_EQ ( Bson_c ( tst.ChildByPath ( "value[1]" ) ).Int(), 2 );
ASSERT_EQ ( Bson_c ( tst.ChildByPath ( "value[2].syntax[2]" ) ).Int (), 42 );
ASSERT_TRUE ( Bson_c ( tst.ChildByPath ( "value[2].value" ) ).StrEq ( "Here" ) );
ASSERT_TRUE ( Bson_c ( tst.ChildByPath ( "value[3]" ) ).StrEq ( "blabla" ) );
}
// test HasAnyOf helper
TEST_F ( TJson, bson_has_any_of )
{
Bson_c tst = Bson ( R"({name:"hello",value1:2,value2:"sdfa",value3:{value4:"foo"}})" );
ASSERT_TRUE ( tst.HasAnyOf ( 2, "foo", "value3" ) );
ASSERT_TRUE ( tst.HasAnyOf ( 2, "name", "value1" ) );
ASSERT_FALSE ( tst.HasAnyOf ( 2, "foo", "bar" ) );
ASSERT_FALSE ( tst.HasAnyOf ( 2, "foo", "value4" ) );
}
// test bson to json render
TEST_F ( TJson, bson_BsonToJson )
{
auto tst = Bsons ( R"(["hello",2,3.1415926,{value4:"foo"}])" );
CSphString sJson;
tst[0].BsonToJson (sJson);
ASSERT_STREQ ( sJson.cstr(),"\"hello\"" );
tst[1].BsonToJson ( sJson );
ASSERT_STREQ ( sJson.cstr (), "2" );
tst[2].BsonToJson ( sJson );
ASSERT_STREQ ( sJson.cstr (), "3.141593" );
tst[3].BsonToJson ( sJson );
ASSERT_STREQ ( sJson.cstr (), R"({"value4":"foo"})" );
Bson_c ( Bson ( "" ) ).BsonToJson ( sJson );
ASSERT_STREQ ( sJson.cstr (), "{}" );
Bson_c ( Bson ( "{}" ) ).BsonToJson ( sJson );
ASSERT_STREQ ( sJson.cstr (), "{}" );
}
// test standalone size
TEST_F ( TJson, bson_BsonToBson )
{
TestConv ( Bson ( "" ), "{}" );
TestConv ( Bson ( "{}" ), "{}" );
TestConv ( Bson ( "[]" ), "[]" );
const char * sJson = R"([1,1.0,["a","b"],[1,"a"],[1,2],[1.0,2.0],{a:1,b:2,c:3}, {}, [], true, false, null])";
auto tst = Bsons ( sJson );
TestConv ( tst[0], nullptr );
TestConv ( tst[1], nullptr );
TestConv ( tst[2], R"(["a","b"])" );
TestConv ( tst[3], R"([1,"a"])" );
TestConv ( tst[4], "[1,2]" );
TestConv ( tst[5], "[1.000000,2.000000]" );
TestConv ( tst[6], R"({"a":1,"b":2,"c":3})" );
TestConv ( tst[7], "{}" );
TestConv ( tst[8], "[]" );
TestConv ( tst[9], nullptr );
TestConv ( tst[10], nullptr );
TestConv ( tst[11], nullptr );
TestConv ( Bson ( sJson ),
R"([1,1.000000,["a","b"],[1,"a"],[1,2],[1.000000,2.000000],{"a":1,"b":2,"c":3},{},[],true,false,null])" );
}
// test contained bson
TEST_F ( TJson, bson_BsonContainer )
{
BsonContainer_c dBson (
R"({ "query": { "percolate": { "document" : { "title" : "A new tree test in the office office" } } } })" );
auto dTitle = Bson_c ( dBson.ChildByPath ( "query.percolate.document.title" ));
ASSERT_TRUE ( dTitle.StrEq ( "A new tree test in the office office" ));
CSphString sJson;
dBson.BsonToJson ( sJson );
ASSERT_STREQ ( sJson.cstr (), R"({"query":{"percolate":{"document":{"title":"A new tree test in the office office"}}}})" );
}
// test contained bson
TEST_F ( TJson, bson_via_cjson )
{
const char * sJson = R"({ "query": { "percolate": { "document" : { "title" : "A new tree test in the office office" } } } })";
auto pCjson = cJSON_Parse ( sJson );
StringBuilder_c sError;
CSphVector<BYTE> dBson;
bson::cJsonToBson (pCjson, dBson, false, false );
if ( pCjson )
cJSON_Delete ( pCjson );
NodeHandle_t dNode;
if ( dBson.IsEmpty () )
return;
const BYTE * pData = dBson.begin ();
dNode.second = sphJsonFindFirst ( &pData );
dNode.first = pData;
Bson_c dBSON ( dNode );
auto dTitle = Bson_c ( dBSON.ChildByPath ( "query.percolate.document.title" ) );
ASSERT_TRUE ( dTitle.StrEq ( "A new tree test in the office office" ) );
CSphString sNewJson;
dBSON.BsonToJson ( sNewJson );
ASSERT_STREQ ( sNewJson.cstr ()
, R"({"query":{"percolate":{"document":{"title":"A new tree test in the office office"}}}})" );
}
TEST_F ( TJson, bson_via_cjson_test_consistency )
{
const char * sJson = R"({ "aR32": [1,2,3,4,20], "ar64": [100000000000,100000000001,100000000002,100000000003,100000000004], "ardbl": [1.1,1.2,1.3], "arrstr":["foo","bar"], "arrmixed":[1,1.0], "arstr":["1","2","3"] })";
auto pCjson = cJSON_Parse ( sJson );
StringBuilder_c sError;
CSphVector<BYTE> dBson;
bson::cJsonToBson ( pCjson, dBson, true );
if ( pCjson )
cJSON_Delete ( pCjson );
NodeHandle_t dNode;
if ( dBson.IsEmpty () )
return;
const BYTE * pData = dBson.begin ();
dNode.second = sphJsonFindFirst ( &pData );
dNode.first = pData;
Bson_c dBSON ( dNode );
auto d32 = Bson_c ( dBSON.ChildByName ( "ar32" ));
auto d64 = Bson_c ( dBSON.ChildByName ( "ar64" ));
auto ddbl = Bson_c ( dBSON.ChildByName ( "ardbl" ));
auto dmixed = Bson_c ( dBSON.ChildByName ( "arrmixed" ));
auto dstr = Bson_c ( dBSON.ChildByName ( "arstr" ) );
ASSERT_TRUE ( d32.IsArray () );
ASSERT_TRUE ( d64.IsArray () );
ASSERT_TRUE ( ddbl.IsArray () );
ASSERT_TRUE ( dmixed.IsArray () );
ASSERT_TRUE ( dstr.GetType ()==JSON_INT32_VECTOR );
}
TEST ( Bson_iterate, root )
{
BsonContainer_c dBson ( R"({ "one":"hello", "two":"world"})" );
BsonIterator_c dIter (dBson);
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), -1 ); // since root is not array
ASSERT_STREQ (dIter.GetName().cstr(), "one");
ASSERT_TRUE ( dIter.IsString() );
ASSERT_TRUE ( dIter.StrEq ("hello"));
ASSERT_TRUE ( dIter.Next() );
ASSERT_STREQ ( dIter.GetName ().cstr (), "two" );
ASSERT_TRUE ( dIter.IsString () );
ASSERT_TRUE ( dIter.StrEq ( "world" ) );
ASSERT_FALSE ( dIter.Next() );
}
TEST ( Bson_iterate, empty_root )
{
BsonContainer_c dBson ( R"({})" );
BsonIterator_c dIter ( dBson );
ASSERT_FALSE ( dIter );
ASSERT_EQ ( dIter.NumElems (), -1 ); // since root is not array
ASSERT_EQ ( dIter.GetName ().cstr (), nullptr );
ASSERT_TRUE ( dIter.IsNull ());
ASSERT_FALSE ( dIter.Next() );
}
TEST ( Bson_iterate, object )
{
BsonContainer_c dRoot ( R"({"x":{"one":"hello", "two":"world"}})" );
Bson_c dBson = dRoot.ChildByName ("x");
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), -1 ); // since obj is not array
ASSERT_STREQ ( dIter.GetName ().cstr (), "one" );
ASSERT_TRUE ( dIter.IsString () );
ASSERT_TRUE ( dIter.StrEq ( "hello" ) );
ASSERT_TRUE ( dIter.Next () );
ASSERT_STREQ ( dIter.GetName ().cstr (), "two" );
ASSERT_TRUE ( dIter.IsString () );
ASSERT_TRUE ( dIter.StrEq ( "world" ) );
ASSERT_FALSE ( dIter.Next () );
}
TEST ( Bson_iterate, empty_object )
{
BsonContainer_c dRoot ( R"({"x":{}})" );
Bson_c dBson = dRoot.ChildByName ( "x" );
BsonIterator_c dIter ( dBson );
ASSERT_FALSE ( dIter );
ASSERT_EQ ( dIter.NumElems (), -1 ); // since obj is not array
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsNull () );
ASSERT_FALSE ( dIter.Next () );
}
TEST ( Bson_iterate, array_int )
{
BsonContainer_c dBson ( R"([1,2])" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 2 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsInt () );
ASSERT_EQ ( dIter.Int(), 1 );
ASSERT_TRUE ( dIter.Next () );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsInt () );
ASSERT_EQ ( dIter.Int (), 2 );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
TEST ( Bson_iterate, array_int64 )
{
BsonContainer_c dBson ( R"([100000000001,100000000002])" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 2 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsInt () );
ASSERT_EQ ( dIter.Int (), 100000000001 );
ASSERT_TRUE ( dIter.Next () );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsInt () );
ASSERT_EQ ( dIter.Int (), 100000000002 );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
TEST ( Bson_iterate, array_double )
{
BsonContainer_c dBson ( R"([1.1,1.2])" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 2 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsDouble () );
ASSERT_EQ ( dIter.Double (), 1.1 );
ASSERT_TRUE ( dIter.Next () );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsDouble () );
ASSERT_EQ ( dIter.Double (), 1.2 );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
TEST ( Bson_iterate, array_string )
{
BsonContainer_c dBson ( R"(["foo","bar"])" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 2 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsString () );
ASSERT_TRUE ( dIter.StrEq ( "foo" ) );
ASSERT_TRUE ( dIter.Next () );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsString () );
ASSERT_TRUE ( dIter.StrEq ( "bar" ) );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
TEST ( Bson_iterate, array_mixed )
{
BsonContainer_c dBson ( R"([1,1.1,"bar"])" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 3 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsInt () );
ASSERT_EQ ( dIter.Int (), 1 );
ASSERT_TRUE ( dIter.Next () );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.Double () );
ASSERT_EQ ( dIter.Double (), 1.1 );
ASSERT_TRUE ( dIter.Next () );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsString () );
ASSERT_TRUE ( dIter.StrEq ( "bar" ) );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
TEST ( Bson_iterate, empty_array )
{
BsonContainer_c dRoot ( R"({"x":[]})" );
Bson_c dBson = dRoot.ChildByName ( "x" );
BsonIterator_c dIter ( dBson );
ASSERT_FALSE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 0 ); // since obj is not array
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsNull () );
ASSERT_FALSE ( dIter.Next () );
}
TEST ( Bson_iterate, _null )
{
BsonContainer_c dRoot ( R"({"x":null})" );
Bson_c dBson = dRoot.ChildByName ( "x" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 1 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_EQ ( dIter.GetType (), JSON_NULL );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
TEST ( Bson_iterate, _true )
{
BsonContainer_c dRoot ( R"({"x":true})" );
Bson_c dBson = dRoot.ChildByName ( "x" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 1 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_EQ ( dIter.GetType (), JSON_TRUE );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
TEST ( Bson_iterate, _false )
{
BsonContainer_c dRoot ( R"({"x":false})" );
Bson_c dBson = dRoot.ChildByName ( "x" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 1 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_EQ ( dIter.GetType (), JSON_FALSE );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
TEST ( Bson_iterate, int32 )
{
BsonContainer_c dRoot ( R"({"x":1})" );
Bson_c dBson = dRoot.ChildByName ( "x" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 1 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsInt () );
ASSERT_EQ ( dIter.Int (), 1 );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
TEST ( Bson_iterate, int64 )
{
BsonContainer_c dRoot ( R"({"x":100000000001})" );
Bson_c dBson = dRoot.ChildByName ( "x" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 1 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsInt () );
ASSERT_EQ ( dIter.Int (), 100000000001 );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
TEST ( Bson_iterate, _double )
{
BsonContainer_c dRoot ( R"({"x":1.1})" );
Bson_c dBson = dRoot.ChildByName ( "x" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 1 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsDouble () );
ASSERT_EQ ( dIter.Double (), 1.1 );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
TEST ( Bson_iterate, _string )
{
BsonContainer_c dRoot ( R"({"x":"hello"})" );
Bson_c dBson = dRoot.ChildByName ( "x" );
BsonIterator_c dIter ( dBson );
ASSERT_TRUE ( dIter );
ASSERT_EQ ( dIter.NumElems (), 1 );
ASSERT_TRUE ( dIter.GetName ().IsEmpty () );
ASSERT_TRUE ( dIter.IsString () );
ASSERT_TRUE ( dIter.StrEq ( "hello" ) );
ASSERT_FALSE ( dIter.Next () );
ASSERT_EQ ( dIter.NumElems (), 0 );
}
// function placed in searchd.cpp, near line 2700. Here is direct copy-paste for testing only.
namespace {
CSphString RemoveBackQuotes ( const char * pSrc )
{
CSphString sResult;
if ( !pSrc )
return sResult;
size_t iLen = strlen ( pSrc );
if ( !iLen )
return sResult;
auto szResult = new char[iLen+1];
auto * sMax = pSrc+iLen;
auto d = szResult;
while ( pSrc<sMax )
{
auto sQuote = (const char *) memchr ( pSrc, '`', sMax-pSrc );
if ( !sQuote )
sQuote = sMax;
auto iChunk = sQuote-pSrc;
memmove ( d, pSrc, iChunk );
d += iChunk;
pSrc += iChunk+1; // +1 to skip the quote
}
*d = '\0';
if ( !*szResult ) // never return allocated, but empty str. Prefer to return nullptr instead.
SafeDeleteArray( szResult );
sResult.Adopt ( &szResult );
return sResult;
}
}
TEST (b, backquote)
{
ASSERT_STREQ( "", RemoveBackQuotes ( nullptr ).scstr () );
char c = '\0';
ASSERT_STREQ( "", RemoveBackQuotes ( &c ).scstr () );
ASSERT_STREQ( "", RemoveBackQuotes ( "" ).scstr () );
ASSERT_STREQ( "", RemoveBackQuotes ( "`" ).scstr () );
ASSERT_STREQ( "", RemoveBackQuotes ( "``" ).scstr () );
ASSERT_STREQ( "", RemoveBackQuotes ( "```" ).scstr () );
ASSERT_STREQ( "a", RemoveBackQuotes ( "a" ).scstr () );
ASSERT_STREQ( "a", RemoveBackQuotes ( "a`" ).scstr () );
ASSERT_STREQ( "a", RemoveBackQuotes ( "a``" ).scstr () );
ASSERT_STREQ( "a", RemoveBackQuotes ( "a```" ).scstr () );
ASSERT_STREQ( "aa", RemoveBackQuotes ( "a`a" ).scstr () );
ASSERT_STREQ( "aa", RemoveBackQuotes ( "a``a" ).scstr () );
ASSERT_STREQ( "aa", RemoveBackQuotes ( "a```a" ).scstr () );
ASSERT_STREQ( "a", RemoveBackQuotes ( "`a" ).scstr () );
ASSERT_STREQ( "a", RemoveBackQuotes ( "``a" ).scstr () );
ASSERT_STREQ( "a", RemoveBackQuotes ( "```a" ).scstr () );
ASSERT_STREQ( "a", RemoveBackQuotes ( "``a`" ).scstr () );
ASSERT_STREQ( "a", RemoveBackQuotes ( "```a``" ).scstr () );
}
class TBson : public ::testing::Test
{
protected:
virtual void SetUp ()
{
dData.Reset();
}
CSphVector<BYTE> dData;
void Check ( const char * sProof )
{
CSphString sResult;
Bson_c ( dData ).BsonToJson ( sResult );
ASSERT_STREQ ( sResult.cstr (), sProof );
}
};
TEST_F ( TBson, bson_empty )
{
{
Root_c foo ( dData );
}
Check ( "{}" );
}
TEST_F ( TBson, bson_simple )
{
{
Root_c foo ( dData );
foo.AddBool ( "true", true );
foo.AddBool ( "false", false );
foo.AddNull ( "empty" );
foo.AddDouble ( "float", 0.2345 );
foo.AddInt( "int", -10 );
foo.AddInt ( "bigint", -1000000000000 );
foo.AddString ("string", "hello");
}
Check ( R"({"true":true,"false":false,"empty":null,"float":0.234500,"int":-10,"bigint":-1000000000000,"string":"hello"})" );
}
TEST_F ( TBson, bson_arr_empty )
{
{
Root_c foo ( dData );
MixedVector_c fee ( foo.StartMixedVec ( "mixed" ), 0 );
StringVector_c bar ( foo.StartStringVec ( "string" ), 0 );
Obj_c baz ( foo.StartObj ( "obj" ) );
}
Check ( R"({"mixed":[],"string":[],"obj":{}})" );
}
TEST_F ( TBson, bson_stringvec )
{
{
Root_c foo ( dData );
StringVector_c bar ( foo.StartStringVec ( "string_vec" ), 3 );
bar.AddValue("one");
bar.AddValue("two");
bar.AddValue("three");
}
Check ( R"({"string_vec":["one","two","three"]})" );
}
TEST_F ( TBson, bson_mixed_vec )
{
{
Root_c foo ( dData );
MixedVector_c bar ( foo.StartMixedVec ( "mixed_vec" ), 8 );
{
Obj_c baz ( bar.StartObj () );
}
{
StringVector_c baz ( bar.StartStringVec (), 2 );
baz.AddValue ( "one" );
baz.AddValue ( "two" );
}
{
MixedVector_c baz ( bar.StartMixedVec (), 2 );
baz.AddString( "one" );
baz.AddInt( 10 );
}
bar.AddInt ( 1000000000000 );
bar.AddDouble( 1.1234 );
bar.AddBool(true);
bar.AddBool(false);
bar.AddNull();
}
Check ( R"({"mixed_vec":[{},["one","two"],["one",10],1000000000000,1.123400,true,false,null]})" );
}
| 38,609
|
C++
|
.cpp
| 1,110
| 32.540541
| 219
| 0.618317
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,991
|
gtests_filter.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_filter.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <gtest/gtest.h>
#include "sphinxfilter.h"
#include "conversion.h"
class filter_block_level : public ::testing::Test
{
protected:
void SetDefault()
{
tOpt.m_sAttrName = "gid";
tOpt.m_bExclude = false;
tOpt.m_bHasEqualMin = true;
tOpt.m_bHasEqualMax = true;
tOpt.m_bOpenLeft = false;
tOpt.m_bOpenRight = false;
tOpt.m_eType = SPH_FILTER_RANGE;
}
void SetUp() override
{
SetDefault();
}
CSphFilterSettings tOpt;
CreateFilterContext_t tCtx;
};
TEST_F ( filter_block_level, range )
{
CSphString sWarning, sError;
CSphSchema tSchema;
CSphColumnInfo tCol;
CSphFixedVector<DWORD> dMin ( DWSIZEOF(DocID_t) + 1 ), dMax ( DWSIZEOF(DocID_t) + 1 );
std::unique_ptr<ISphFilter> tFilter;
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tCol.m_sName = "gid";
tSchema.AddAttr ( tCol, false );
tOpt.m_iMinValue = 10;
tOpt.m_iMaxValue = 40;
tCtx.m_pMatchSchema = &tSchema;
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter!=nullptr );
// filter >=10 && <=40 vs block 1-5
*dMin.Begin() = 1;
*dMax.Begin() = 5;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >=10 && <=40 vs block 1-10
*dMax.Begin() = 10;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >=10 && <=40 vs block 40-50
*dMin.Begin() = 40;
*dMax.Begin() = 50;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >=10 && <=40 vs block 41-50
*dMin.Begin() = 41;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >=10 && <=40 vs block 9-41
*dMin.Begin() = 9;
*dMax.Begin() = 41;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
///
SetDefault();
tOpt.m_iMaxValue = 40;
tOpt.m_bOpenLeft = true;
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter!=nullptr );
// filter <=40 vs block 41-50
*dMin.Begin() = 41;
*dMax.Begin() = 50;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter <=40 vs block 40-50
*dMin.Begin() = 40;
*dMax.Begin() = 50;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter <=40 vs block 39-50
*dMin.Begin() = 39;
*dMax.Begin() = 50;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter <=40 vs block 30-40
*dMin.Begin() = 30;
*dMax.Begin() = 40;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter <=40 vs block 1-4
*dMin.Begin() = 1;
*dMax.Begin() = 4;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
///
SetDefault ();
tOpt.m_iMinValue = 15;
tOpt.m_bOpenRight = true;
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter!=nullptr );
// filter >=15 vs block 10-14
*dMin.Begin() = 10;
*dMax.Begin() = 14;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >=15 vs block 10-15
*dMin.Begin() = 10;
*dMax.Begin() = 15;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >=15 vs block 10-16
*dMin.Begin() = 10;
*dMax.Begin() = 16;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >=15 vs block 55-65
*dMin.Begin() = 55;
*dMax.Begin() = 65;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >=15 vs block 1-5
*dMin.Begin() = 1;
*dMax.Begin() = 5;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
SetDefault ();
tOpt.m_iMinValue = 10;
tOpt.m_iMaxValue = 40;
tOpt.m_bHasEqualMin = false;
tOpt.m_bHasEqualMax = false;
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter!=nullptr );
// filter >10 && <40 vs block 1-5
*dMin.Begin() = 1;
*dMax.Begin() = 5;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >10 && <40 vs block 1-10
*dMax.Begin() = 10;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >10 && <40 vs block 40-50
*dMin.Begin() = 40;
*dMax.Begin() = 50;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >10 && <40 vs block 41-50
*dMin.Begin() = 41;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >10 && <40 vs block 39-50
*dMin.Begin() = 39;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >10 && <40 vs block 9-41
*dMin.Begin() = 9;
*dMax.Begin() = 41;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
///
SetDefault ();
tOpt.m_iMaxValue = 40;
tOpt.m_bOpenLeft = true;
tOpt.m_bHasEqualMax = false;
tOpt.m_bHasEqualMin = false;
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter!=nullptr );
// filter <40 vs block 40-50
*dMin.Begin() = 40;
*dMax.Begin() = 50;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter <40 vs block 41-50
*dMin.Begin() = 41;
*dMax.Begin() = 50;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter <40 vs block 39-50
*dMin.Begin() = 39;
*dMax.Begin() = 50;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter <40 vs block 30-40
*dMin.Begin() = 30;
*dMax.Begin() = 40;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter <40 vs block 1-4
*dMin.Begin() = 1;
*dMax.Begin() = 4;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
///
SetDefault ();
tOpt.m_iMinValue = 15;
tOpt.m_bOpenRight = true;
tOpt.m_bHasEqualMax = false;
tOpt.m_bHasEqualMin = false;
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter!=nullptr );
// filter >15 vs block 10-15
*dMin.Begin() = 10;
*dMax.Begin() = 15;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >15 vs block 10-16
*dMin.Begin() = 10;
*dMax.Begin() = 16;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >15 vs block 55-65
*dMin.Begin() = 55;
*dMax.Begin() = 65;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >15 vs block 1-5
*dMin.Begin() = 1;
*dMax.Begin() = 5;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
}
TEST_F ( filter_block_level, range_float )
{
CSphString sWarning, sError;
CSphSchema tSchema;
CSphColumnInfo tCol;
CSphFixedVector<DWORD> dMin ( DWSIZEOF(DocID_t) + 1 ), dMax ( DWSIZEOF(DocID_t) + 1 );
std::unique_ptr<ISphFilter> tFilter;
tCol.m_eAttrType = SPH_ATTR_FLOAT;
tCol.m_sName = "gid";
tSchema.AddAttr ( tCol, false );
tOpt.m_eType = SPH_FILTER_FLOATRANGE;
tOpt.m_fMinValue = 10.0f;
tOpt.m_fMaxValue = 40.0f;
tOpt.m_bHasEqualMin = false;
tOpt.m_bHasEqualMax = false;
tCtx.m_pMatchSchema = &tSchema;
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter!=nullptr );
// filter >10 && <40 vs block 1-5
*dMin.Begin() = sphF2DW ( 1.0f );
*dMax.Begin() = sphF2DW ( 5.0f );
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >10 && <40 vs block 1-10.1
*dMax.Begin() = sphF2DW ( 10.1f );
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >10 && <40 vs block 39.9-50
*dMin.Begin() = sphF2DW ( 39.9f );
*dMax.Begin() = sphF2DW ( 50.0f );
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >10 && <40 vs block 40-50
*dMin.Begin() = sphF2DW ( 40.0f );
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >10 && <40 vs block 9-41
*dMin.Begin() = sphF2DW ( 9.0f );
*dMax.Begin() = sphF2DW ( 41.0f );
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
///
SetDefault ();
tOpt.m_eType = SPH_FILTER_FLOATRANGE;
tOpt.m_fMinValue = 0.0f;
tOpt.m_fMaxValue = 40.0f;
tOpt.m_bOpenLeft = true;
tOpt.m_bHasEqualMin = false;
tOpt.m_bHasEqualMax = false;
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter!=nullptr );
// filter <40 vs block 40-50
*dMin.Begin() = sphF2DW ( 40.0f );
*dMax.Begin() = sphF2DW ( 50.0f );
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter <40 vs block 39.9-50
*dMin.Begin() = sphF2DW ( 39.9f );
*dMax.Begin() = sphF2DW ( 50.0f );
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter <40 vs block 30-40
*dMin.Begin() = sphF2DW ( 30.0f );
*dMax.Begin() = sphF2DW ( 40.0f );
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter <40 vs block 1-4
*dMin.Begin() = sphF2DW ( 1.0f );
*dMax.Begin() = sphF2DW ( 4.0f );
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
tOpt.m_fMinValue = 30.0f;
tOpt.m_fMaxValue = 40.0f;
tOpt.m_bOpenLeft = true; // FIXME!!! OpenLeft should work for FLOAT range too
tOpt.m_bHasEqualMin = false;
tOpt.m_bHasEqualMax = false;
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter != nullptr );
// filter <40 vs block 1-4
*dMin.Begin() = sphF2DW ( 1.0f );
*dMax.Begin() = sphF2DW ( 4.0f );
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) ); // FIXME!!! should be TRUE due to m_bOpenLeft option
///
SetDefault ();
tOpt.m_eType = SPH_FILTER_FLOATRANGE;
tOpt.m_fMinValue = 15.0f;
tOpt.m_fMaxValue = 100.0f;
tOpt.m_bOpenRight = true;
tOpt.m_bHasEqualMin = false;
tOpt.m_bHasEqualMax = false;
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter != nullptr );
// filter >15 vs block 10-15
*dMin.Begin() = sphF2DW ( 10.0f );
*dMax.Begin() = sphF2DW ( 15.0f );
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >15 vs block 10-16
*dMin.Begin() = sphF2DW ( 10.0f );
*dMax.Begin() = sphF2DW ( 16.0f );
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >15 vs block 55-65
*dMin.Begin() = sphF2DW ( 55.0f );
*dMax.Begin() = sphF2DW ( 65.0f );
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >15 vs block 1-5
*dMin.Begin() = sphF2DW ( 1.0f );
*dMax.Begin() = sphF2DW ( 5.0f );
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter >15 vs block 150-200
*dMin.Begin() = sphF2DW ( 150.0f );
*dMax.Begin() = sphF2DW ( 200.0f );
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) ); // FIXME!!! should be TRUE due to m_bOpenRight option
}
TEST_F ( filter_block_level, values )
{
CSphString sWarning, sError;
CSphSchema tSchema;
CSphColumnInfo tCol;
CSphFixedVector<DWORD> dMin ( DWSIZEOF(DocID_t) + 1 ), dMax ( DWSIZEOF(DocID_t) + 1 );
std::unique_ptr<ISphFilter> tFilter;
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tCol.m_sName = "gid";
tSchema.AddAttr ( tCol, false );
tOpt.m_eType = SPH_FILTER_VALUES;
SphAttr_t dValues[] = { 10, 40, 100 };
tOpt.SetExternalValues ( { dValues, sizeof ( dValues ) / sizeof ( dValues[0] ) } );
tCtx.m_pMatchSchema = &tSchema;
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter != nullptr );
// filter values vs block 1-9
*dMin.Begin() = 1;
*dMax.Begin() = 9;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter values vs block 11-39
*dMin.Begin() = 11;
*dMax.Begin() = 39;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter values vs block 9-101
*dMin.Begin() = 9;
*dMax.Begin() = 101;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter values vs block 41-101
*dMin.Begin() = 41;
*dMax.Begin() = 101;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
/// single value
SetDefault ();
tOpt.m_eType = SPH_FILTER_VALUES;
SphAttr_t dValuesSingle[] = { 10 };
tOpt.SetExternalValues ( { dValuesSingle, sizeof ( dValuesSingle ) / sizeof ( dValuesSingle[0] ) } );
tFilter = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( tFilter != nullptr );
// filter values vs block 1-9
*dMin.Begin() = 1;
*dMax.Begin() = 9;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter values vs block 11-39
*dMin.Begin() = 11;
*dMax.Begin() = 39;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter values vs block 9-101
*dMin.Begin() = 9;
*dMax.Begin() = 11;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
}
TEST_F ( filter_block_level, and2 )
{
CSphString sWarning, sError;
CSphSchema tSchema;
CSphColumnInfo tCol;
CSphFixedVector<DWORD> dMin ( DWSIZEOF(DocID_t) + 1 ), dMax ( DWSIZEOF(DocID_t) + 1 );
std::unique_ptr<ISphFilter> tFilter;
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tCol.m_sName = "gid";
tSchema.AddAttr ( tCol, false );
tOpt.m_eType = SPH_FILTER_VALUES;
SphAttr_t dVal1[] = { 10 };
tOpt.SetExternalValues ( { dVal1, sizeof ( dVal1 ) / sizeof ( dVal1[0] ) } );
tCtx.m_pMatchSchema = &tSchema;
auto pFilter1 = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( pFilter1!=nullptr );
SphAttr_t dVal2[] = { 20 };
tOpt.SetExternalValues ( { dVal2, sizeof ( dVal2 ) / sizeof ( dVal2[0] ) } );
auto pFilter2 = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( pFilter2!=nullptr );
tFilter = sphJoinFilters ( std::move ( pFilter1 ), std::move ( pFilter2 ) );
ASSERT_TRUE ( tFilter != nullptr );
// filter values vs block 5-9
*dMin.Begin() = 5;
*dMax.Begin() = 9;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter values vs block 11-29
*dMin.Begin() = 11;
*dMax.Begin() = 29;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter values vs block 9-21
*dMin.Begin() = 9;
*dMax.Begin() = 21;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
}
TEST_F ( filter_block_level, and3 )
{
CSphString sWarning, sError;
CSphSchema tSchema;
CSphColumnInfo tCol;
CSphFixedVector<DWORD> dMin ( DWSIZEOF(DocID_t) + 1 ), dMax ( DWSIZEOF(DocID_t) + 1 );
std::unique_ptr<ISphFilter> tFilter;
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tCol.m_sName = "gid";
tSchema.AddAttr ( tCol, false );
tOpt.m_eType = SPH_FILTER_VALUES;
SphAttr_t dVal1[] = { 10 };
tOpt.SetExternalValues ( { dVal1, sizeof ( dVal1 ) / sizeof ( dVal1[0] ) } );
tCtx.m_pMatchSchema = &tSchema;
auto pFilter1 = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( pFilter1!=nullptr );
SphAttr_t dVal2[] = { 15 };
tOpt.SetExternalValues ( { dVal2, sizeof ( dVal2 ) / sizeof ( dVal2[0] ) } );
auto pFilter2 = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( pFilter2!=nullptr );
SphAttr_t dVal3[] = { 20 };
tOpt.SetExternalValues ( { dVal3, sizeof ( dVal3 ) / sizeof ( dVal3[0] ) } );
auto pFilter3 = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( pFilter3!=nullptr );
tFilter = sphJoinFilters ( std::move ( pFilter1 ), sphJoinFilters ( std::move ( pFilter2 ), std::move ( pFilter3 ) ) );
ASSERT_TRUE ( tFilter != nullptr );
// filter values vs block 11-14
*dMin.Begin() = 11;
*dMax.Begin() = 14;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter values vs block 9-16
*dMin.Begin() = 9;
*dMax.Begin() = 16;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter values vs block 5-25
*dMin.Begin() = 5;
*dMax.Begin() = 25;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
}
TEST_F ( filter_block_level, and )
{
CSphString sWarning, sError;
CSphSchema tSchema;
CSphColumnInfo tCol;
CSphFixedVector<DWORD> dMin ( DWSIZEOF(DocID_t) + 1 ), dMax ( DWSIZEOF(DocID_t) + 1 );
std::unique_ptr<ISphFilter> tFilter;
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tCol.m_sName = "gid";
tSchema.AddAttr ( tCol, false );
tOpt.m_eType = SPH_FILTER_VALUES;
SphAttr_t dVal1[] = { 10 };
tOpt.SetExternalValues ( { dVal1, sizeof ( dVal1 ) / sizeof ( dVal1[0] ) } );
tCtx.m_pMatchSchema = &tSchema;
auto pFilter1 = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( pFilter1!=nullptr );
SphAttr_t dVal2[] = { 14 };
tOpt.SetExternalValues ( { dVal2, sizeof ( dVal2 ) / sizeof ( dVal2[0] ) } );
auto pFilter2 = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( pFilter2!= nullptr );
SphAttr_t dVal3[] = { 18 };
tOpt.SetExternalValues ( { dVal3, sizeof ( dVal3 ) / sizeof ( dVal3[0] ) } );
auto pFilter3 = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( pFilter3!= nullptr );
SphAttr_t dVal4[] = { 20 };
tOpt.SetExternalValues ( { dVal4, sizeof ( dVal4 ) / sizeof ( dVal4[0] ) } );
auto pFilter4 = sphCreateFilter ( tOpt, tCtx, sError, sWarning );
ASSERT_TRUE ( pFilter4!= nullptr );
tFilter = sphJoinFilters ( std::move ( pFilter1 ), sphJoinFilters ( std::move ( pFilter2 ), sphJoinFilters ( std::move ( pFilter3 ), std::move ( pFilter4 ) ) ) );
ASSERT_TRUE ( tFilter != nullptr );
// filter values vs block 5-19
*dMin.Begin() = 5;
*dMax.Begin() = 19;
ASSERT_FALSE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
// filter values vs block 1-30
*dMin.Begin() = 1;
*dMax.Begin() = 30;
ASSERT_TRUE ( tFilter->EvalBlock ( dMin.Begin(), dMax.Begin() ) );
}
| 17,543
|
C++
|
.cpp
| 469
| 35.200426
| 163
| 0.663245
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,992
|
gtests_strfmt.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_strfmt.cpp
|
//
// Copyright (c) 2022-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include <gtest/gtest.h>
#include "std/ints.h"
#include "std/num_conv.h"
#include "sphinxutils.h"
#include "sphinxjson.h"
#include "aggrexpr.h"
TEST ( functions, NtoA )
{
using namespace sph;
char sBuf[50];
memset ( sBuf, 255, 50 );
int iLen = NtoA ( sBuf, (DWORD)50 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "50", sBuf );
iLen = NtoA ( sBuf, 50, 10, 0, 4 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "0050", sBuf );
iLen = NtoA ( sBuf, 50, 10, 4 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( " 50", sBuf );
iLen = NtoA ( sBuf, 50, 10, 6, 3 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( " 050", sBuf );
iLen = NtoA ( sBuf, 50, 10, 6, 3, '_' );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "___050", sBuf );
iLen = NtoA<int64_t> ( sBuf, 0xFFFFFFFFFFFFFFFFll );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "-1", sBuf );
iLen = NtoA<int64_t> ( sBuf, 0x8000000000000000ll );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "-9223372036854775808", sBuf );
iLen = NtoA ( sBuf, 0x7FFFFFFFFFFFFFFFll );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "9223372036854775807", sBuf );
iLen = NtoA ( sBuf, -9223372036854775807 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "-9223372036854775807", sBuf );
sBuf[NtoA ( sBuf, -9223372036854775807 )] = '\0';
ASSERT_STREQ ( "-9223372036854775807", sBuf );
iLen = NtoA ( sBuf, 9223372036854775807 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "9223372036854775807", sBuf );
iLen = NtoA<int64_t> ( sBuf, 0xFFFFFFFFFFFFFFFFll, 16 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "-1", sBuf );
iLen = NtoA<int64_t> ( sBuf, 0x8000000000000000ll, 16 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "-8000000000000000", sBuf );
}
TEST ( functions, IFtoA )
{
using namespace sph;
char sBuf[50];
memset ( sBuf, 255, 50 );
int iLen = IFtoA ( sBuf, 50000, 3 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "50.000", sBuf );
iLen = IFtoA ( sBuf, -50000, 3 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "-50.000", sBuf );
iLen = IFtoA ( sBuf, -1, 3 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "-0.001", sBuf );
iLen = IFtoA ( sBuf, 1, 3 );
sBuf[iLen] = '\0';
ASSERT_STREQ ( "0.001", sBuf );
}
void test_mysprintf ( const char* sFmt, int64_t iNum, const char* sResult )
{
using namespace sph;
char sBuf[50];
memset ( sBuf, 255, 50 );
sph::Sprintf ( sBuf, sFmt, iNum );
ASSERT_STREQ ( sBuf, sResult ) << " (on fmt " << sFmt << ")";
}
void test_sprintf ( const char* sFmt, int64_t iNum )
{
char sBuf[50];
snprintf ( sBuf, 49, sFmt, iNum );
test_mysprintf ( sFmt, iNum, sBuf );
}
void test_sphintf_for ( int64_t iNum )
{
test_sprintf ( "%d", iNum );
test_sprintf ( "%0d", iNum );
test_sprintf ( "%4d", iNum );
test_sprintf ( "%04d", iNum );
test_sprintf ( "%.4d", iNum );
test_sprintf ( "%0.4d", iNum );
test_sprintf ( "%9.3d", iNum );
test_sprintf ( "%09.3d", iNum );
}
TEST ( functions, sph_Sprintf )
{
test_sphintf_for ( 0 );
test_sphintf_for ( 50 );
test_sphintf_for ( -50 );
test_sphintf_for ( 10000 );
test_sphintf_for ( -10000 );
int iNum = -10000;
test_mysprintf ( "%l", iNum, "-10000" ); // %l is our specific for 64-bit signed
test_mysprintf ( "%0l", iNum, "-10000" );
test_mysprintf ( "%4l", iNum, "-10000" );
test_mysprintf ( "%04l", iNum, "-10000" );
test_mysprintf ( "%.4l", iNum, "-10000" );
test_mysprintf ( "%0.4l", iNum, "-10000" );
test_mysprintf ( "%9.3l", iNum, " -10000" );
test_mysprintf ( "%09.3l", iNum, " -10000" );
test_mysprintf ( "%l", -100000000000000, "-100000000000000" ); // %l is our specific for 64-bit signed
// my own fixed-point nums
test_mysprintf ( "%.3D", iNum, "-10.000" );
test_mysprintf ( "%.9D", iNum, "-0.000010000" );
test_mysprintf ( "%.3F", iNum, "-10.000" );
test_mysprintf ( "%.5F", iNum, "-0.10000" );
iNum = 10000;
test_mysprintf ( "%U", iNum, "10000" ); // %U is our specific for 64-bit signed
test_mysprintf ( "%0U", iNum, "10000" );
test_mysprintf ( "%4U", iNum, "10000" );
test_mysprintf ( "%04U", iNum, "10000" );
test_mysprintf ( "%.4U", iNum, "10000" );
test_mysprintf ( "%0.4U", iNum, "10000" );
test_mysprintf ( "%9.3U", iNum, " 10000" );
test_mysprintf ( "%09.3U", iNum, " 10000" );
// fallback to stardard %f
using namespace sph;
char sBuf[50];
memset ( sBuf, 255, 50 );
sph::Sprintf ( sBuf, "%03.2f", 99.9911 );
ASSERT_STREQ ( sBuf, "99.99" );
// strings output
sph::Sprintf ( sBuf, "%s", "hello" );
ASSERT_STREQ ( sBuf, "hello" );
sph::Sprintf ( sBuf, "%-s", "hello" );
ASSERT_STREQ ( sBuf, "hello" );
sph::Sprintf ( sBuf, "%10s", "hello" );
ASSERT_STREQ ( sBuf, " hello" );
sph::Sprintf ( sBuf, "%-10s", "hello" );
ASSERT_STREQ ( sBuf, "hello " );
sph::Sprintf ( sBuf, "%-10.3s", "hello" );
ASSERT_STREQ ( sBuf, "hel " );
sph::Sprintf ( sBuf, "%10.3s", "hello" );
ASSERT_STREQ ( sBuf, " hel" );
sph::Sprintf ( sBuf, "Hello %l, %d world!", -100000000000000, -2000000000 );
ASSERT_STREQ ( sBuf, "Hello -100000000000000, -2000000000 world!" );
// std 64 fmt modifiers
iNum = 10000;
test_mysprintf ( "test " INT64_FMT, iNum, "test 10000" );
test_mysprintf ( "test " UINT64_FMT, iNum, "test 10000" );
}
// sph::Sprintf into StringBuilder_c
TEST ( functions, sph_Sprintf_to_builder )
{
using namespace sph;
StringBuilder_c sBuf;
Sprintf ( sBuf, "%-10s", "hello" );
EXPECT_STREQ ( sBuf.cstr(), "hello " );
sBuf.Clear();
Sprintf ( sBuf, "%03.2f", 99.9911 );
EXPECT_STREQ ( sBuf.cstr(), "99.99" );
sBuf.Clear();
sph::Sprintf ( sBuf, "Hello %d, %l world!", -2000000000, -100000000000000 );
ASSERT_STREQ ( sBuf.cstr(), "Hello -2000000000, -100000000000000 world!" );
sph::Sprintf ( sBuf, "Hi!" );
ASSERT_STREQ ( sBuf.cstr(), "Hello -2000000000, -100000000000000 world!Hi!" );
sBuf.Clear();
Sprintf ( sBuf, "%09.3d", -10000 );
EXPECT_STREQ ( sBuf.cstr(), " -10000" );
sBuf.Clear();
Sprintf ( sBuf, "%.3D", (int64_t)-10000 );
EXPECT_STREQ ( sBuf.cstr(), "-10.000" );
sBuf.Clear();
Sprintf ( sBuf, "%.9D", -10000ll );
ASSERT_STREQ ( sBuf.cstr(), "-0.000010000" );
sBuf.Clear();
sBuf.StartBlock ( dJsonObj );
sBuf.Sprintf ( "%d %d %d", 1, -1, 100 );
sBuf.Sprintf ( "%d %d %d", 2, -2, 200 );
sBuf.FinishBlock();
ASSERT_STREQ ( sBuf.cstr(), "{1 -1 100,2 -2 200}" );
sBuf.Sprintf ( " %.3F, %.6F", 999500, -1400932 );
ASSERT_STREQ ( sBuf.cstr(), "{1 -1 100,2 -2 200} 999.500, -1.400932" );
sBuf.Sprintf ( " %.3F", 999005 );
ASSERT_STREQ ( sBuf.cstr(), "{1 -1 100,2 -2 200} 999.500, -1.400932 999.005" );
}
TEST ( functions, builder_sprintf_formatters )
{
using namespace sph;
StringBuilder_c sBuf;
// %s - string
Sprintf ( sBuf, "%s", "hello" );
EXPECT_STREQ ( sBuf.cstr(), "hello" );
// %p - pointer
sBuf.Clear();
sBuf.Sprintf ( "%p", nullptr );
EXPECT_STREQ ( sBuf.cstr(), "0" );
// %x - hex unsigned integer
sBuf.Clear();
sBuf.Sprintf ( "%x", 0xFFFF );
EXPECT_STREQ ( sBuf.cstr(), "ffff" );
// %u - unsigned integer
sBuf.Clear();
sBuf.Sprintf ( "%u", 0xFFFF );
EXPECT_STREQ ( sBuf.cstr(), "65535" );
// %d - decimal integer
sBuf.Clear();
sBuf.Sprintf ( "%d", -2 );
EXPECT_STREQ ( sBuf.cstr(), "-2" );
// %i - ignore value
sBuf.Clear();
sBuf.Sprintf ( "hello%i world", -2 );
EXPECT_STREQ ( sBuf.cstr(), "hello world" );
// %l - decimal int64
sBuf.Clear();
sBuf.Sprintf ( "%l", (int64_t)-1 );
EXPECT_STREQ ( sBuf.cstr(), "-1" );
// %U - decimal uint64
sBuf.Clear();
sBuf.Sprintf ( "%U", (uint64_t)4294967295UL );
EXPECT_STREQ ( sBuf.cstr(), "4294967295" );
// %D - fixed-point signed 64 bit
sBuf.Clear();
Sprintf ( sBuf, "%.3D", (int64_t)-10000 );
EXPECT_STREQ ( sBuf.cstr(), "-10.000" );
// %F - fixed-point signed 32 bit
sBuf.Clear();
sBuf.Sprintf ( "%.3F", 999005 );
EXPECT_STREQ ( sBuf.cstr(), "999.005" );
// %t - timespan in int64 useconds
sBuf.Clear();
sBuf.Sprintf ( "%t", (int64_t)1000065 );
EXPECT_STREQ ( sBuf.cstr(), "1s" );
sBuf.Clear();
sBuf.Sprintf ( "char %c is here", 'a' );
EXPECT_STREQ ( sBuf.cstr(), "char a is here" );
}
TEST ( functions, sph_Sprintf_regression_on_empty_buf )
{
StringBuilder_c sBuf;
sBuf.Sprintf ( "%.3F", 10 );
ASSERT_STREQ ( sBuf.cstr(), "0.010" );
}
TEST ( functions, sph_Sprintf_inttimespans )
{
StringBuilder_c sBuf;
static const struct
{
int64_t tm;
const char* res;
} models[] =
{
{ 4, "4us" },
{ 5000, "5ms" },
{ 6000000, "6s" },
{ 120000000, "2m" },
{ 3600000000, "1h" },
{ 3600000000ULL * 24 * 2, "2d" },
{ 3600000000ULL * 24 * 7 * 2, "2w" },
};
for ( const auto& model : models )
{
sBuf.Sprintf ( "%t", model.tm );
EXPECT_STREQ ( sBuf.cstr(), model.res ) << "for " << model.tm << " with %t";
sBuf.Clear();
}
}
TEST ( functions, sph_Sprintf_fractimespans_round )
{
StringBuilder_c sBuf;
static const struct
{
int64_t tm;
const char* fmt;
const char* res;
} models[] =
{
// us rounding
{ 999, "%t", "999us" },
{ 999, "%.1t", "999us" },
{ 999, "%.2t", "999us" },
{ 999, "%.3t", "999us" },
// ms rounding
{ 1559, "%t", "2ms" },
{ 1559, "%.1t", "1.6ms" },
{ 1559, "%.2t", "1.56ms" },
{ 1559, "%.3t", "1ms 559us" },
// s rounding
{ 1555555, "%t", "2s" },
{ 1555555, "%.1t", "1.6s" },
{ 1555555, "%.2t", "1.56s" },
{ 1555555, "%.3t", "1s 556ms" },
{ 1555555, "%.4t", "1s 555.6ms" },
{ 1555555, "%.5t", "1s 555.56ms" },
{ 1999995, "%.5t", "2s" },
{ 1555555, "%.6t", "1s 555ms 555us" },
// m rounding
{ 71555555, "%t", "1m" },
{ 71555555, "%.1t", "1.2m" },
{ 71555555, "%.2t", "1m 12s" },
{ 71555555, "%.3t", "1m 11.6s" },
{ 71555555, "%.4t", "1m 11.56s" },
{ 71555555, "%.5t", "1m 11s 556ms" },
{ 71555555, "%.6t", "1m 11s 555.6ms" },
{ 71555555, "%.7t", "1m 11s 555.56ms" },
{ 71555555, "%.8t", "1m 11s 555ms 555us" },
{ 89999999, "%.7t", "1m 30s" },
{ 89999994, "%.7t", "1m 29s 999.99ms" },
{ 89999995, "%.7t", "1m 30s" },
{ 90999999, "%.7t", "1m 31s" },
};
for ( const auto& model : models )
{
sBuf.Sprintf ( model.fmt, model.tm );
EXPECT_STREQ ( sBuf.cstr(), model.res ) << "for " << model.tm << " with " << model.fmt;
sBuf.Clear();
}
}
TEST ( functions, sph_Sprintf_fractimezero )
{
StringBuilder_c sBuf;
sBuf.Sprintf ( "%t", 0ULL );
ASSERT_STREQ ( sBuf.cstr(), "0us" );
sBuf.Clear();
sBuf.Sprintf ( "%.3t", 0ULL );
ASSERT_STREQ ( sBuf.cstr(), "0us" );
sBuf.Clear();
}
TEST ( functions, date_parse )
{
int64_t iDate = 0;
StringBuilder_c sBuf;
sBuf.Clear();
iDate = GetUTC ( "2019-03" );
FormatDate ( iDate, sBuf );
EXPECT_STREQ ( sBuf.cstr(), "2019-03-01T00:00:00" );
sBuf.Clear();
iDate = GetUTC ( "2019-03-23" );
FormatDate ( iDate, sBuf );
EXPECT_STREQ ( sBuf.cstr(), "2019-03-23T00:00:00" );
sBuf.Clear();
iDate = GetUTC ( "2019-03-23T21" );
FormatDate ( iDate, sBuf );
EXPECT_STREQ ( sBuf.cstr(), "2019-03-23T21:00:00" );
sBuf.Clear();
iDate = GetUTC ( "2019-03-23T21:34" );
FormatDate ( iDate, sBuf );
EXPECT_STREQ ( sBuf.cstr(), "2019-03-23T21:34:00" );
sBuf.Clear();
iDate = GetUTC ( "2019-03-23T21:34:46" );
FormatDate ( iDate, sBuf );
EXPECT_STREQ ( sBuf.cstr(), "2019-03-23T21:34:46" );
sBuf.Clear();
iDate = GetUTC ( "2019-03-23T21:34:46.1234567" );
FormatDate ( iDate, sBuf );
EXPECT_STREQ ( sBuf.cstr(), "2019-03-23T21:34:46" );
sBuf.Clear();
iDate = GetUTC ( "2019-03-23T21:34:46-04:00" );
FormatDate ( iDate, sBuf );
EXPECT_STREQ ( sBuf.cstr(), "2019-03-23T21:34:46" );
sBuf.Clear();
iDate = GetUTC ( "2019-03-23T21:34:46.123-04:00" );
FormatDate ( iDate, sBuf );
EXPECT_STREQ ( sBuf.cstr(), "2019-03-23T21:34:46" );
sBuf.Clear();
iDate = GetUTC ( "1553371205" );
FormatDate ( iDate, sBuf );
EXPECT_STREQ ( sBuf.cstr(), "2019-03-23T20:00:05" );
}
| 12,041
|
C++
|
.cpp
| 379
| 29.453826
| 103
| 0.606649
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,993
|
gtests_tokenizer.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_tokenizer.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <gtest/gtest.h>
#include "sphinxint.h"
#include "tokenizer/tokenizer.h"
#include "tokenizer/tok_internals.h"
// Miscelaneous tests of tokenizer
enum : DWORD
{
TOK_EXCEPTIONS = 1, TOK_NO_DASH = 2, TOK_NO_SHORT = 4
};
extern const char * g_sTmpfile;
extern const char * g_sMagickTmpfile;
extern const char * g_sMagic;
class TokenizerGtest : public ::testing::Test
{
protected:
TokenizerRefPtr_c CreateTestTokenizer ( DWORD uMode )
{
StrVec_t dWarnings;
CSphString sError;
CSphTokenizerSettings tSettings;
if ( !( uMode & TOK_NO_SHORT ) )
tSettings.m_iMinWordLen = 2;
TokenizerRefPtr_c pTokenizer = Tokenizer::Create ( tSettings, nullptr, nullptr, dWarnings, sError );
if ( !( uMode & TOK_NO_DASH ) )
{
Verify ( pTokenizer->SetCaseFolding ( "-, 0..9, A..Z->a..z, _, a..z, U+80..U+FF", sError ) );
pTokenizer->AddSpecials ( "!-" );
} else
{
Verify ( pTokenizer->SetCaseFolding ( "0..9, A..Z->a..z, _, a..z, U+80..U+FF", sError ) );
pTokenizer->AddSpecials ( "!" );
}
if ( uMode & TOK_EXCEPTIONS )
{
Verify ( pTokenizer->LoadSynonyms ( g_sMagickTmpfile, nullptr, dWarnings, sError ) );
}
// tricky little shit!
// we want to create a query mode tokenizer
// the official way is to Clone() an indexing mode one, so we do that
// however, Clone() adds backslash as a special
// and that must be done *after* SetCaseFolding, otherwise it's not special any more
return pTokenizer->Clone ( SPH_CLONE_QUERY );
}
TokenizerRefPtr_c m_pTokenizer;
TokenizerRefPtr_c& pTokenizer = m_pTokenizer;
CSphString sError;
};
TEST_F( TokenizerGtest, exceptions_more )
{
m_pTokenizer = CreateTestTokenizer ( TOK_EXCEPTIONS | TOK_NO_SHORT );
ASSERT_TRUE ( m_pTokenizer->SetBlendChars ( "+, U+23", sError ) );
const char * dTests[] =
{ // for completeness...
"AT&T!!!", "AT&T", "!", "!", "!", nullptr, // exceptions vs specials
"U.S.AB U.S.A. U.S.B.U.S.D.U.S.U.S.A.F.", "US", "ab", "USA", "USB", "USD", "US", "USAF", nullptr,
"Y.M.C.A.", "y", "m", "c", "a", nullptr,
"B&E's", "b", "e", "s", nullptr,
// exceptions vs spaces
"AT & T", "AT & T", nullptr,
"AT & T", "AT & T", nullptr,
"AT & T", "AT & T", nullptr,
"AT$&$T", "at", "t", nullptr,
// prefix fun
"U.S.A.X.", "USA", "x", nullptr,
"U.X.U.S.A.", "u", "x", "USA", nullptr,
// exceptions vs blended
"#test this", "#test", "test", "this", nullptr,
"#test this", "#test", "test", "this", nullptr,
"test#that", "test#that", "test", "that", nullptr,
"1+2", "1+2", "1", "2", nullptr,
"te.st#this", "te", "st#this", "st", "this", nullptr,
"U.boat", "u", "boat", nullptr,
// regressions
";foo bar", ";", "foo", "bar", nullptr,
nullptr
};
for ( int iCur = 0; dTests[iCur]; )
{
m_pTokenizer->SetBuffer ( ( BYTE * ) dTests[iCur], (int) strlen ( dTests[iCur] ) );
++iCur;
for ( BYTE * pToken = m_pTokenizer->GetToken (); pToken; pToken = m_pTokenizer->GetToken () )
{
ASSERT_TRUE ( dTests[iCur] );
ASSERT_STREQ ( ( const char * ) pToken, dTests[iCur] );
++iCur;
}
ASSERT_FALSE ( dTests[iCur] ) << "Failed for " << iCur;
++iCur;
}
TokenizerRefPtr_c pQtok = m_pTokenizer->Clone ( SPH_CLONE_QUERY );
pQtok->SetBuffer ( ( BYTE * ) "life:)", 6 );
ASSERT_STREQ ( ( char * ) pQtok->GetToken (), "life:)" );
ASSERT_FALSE ( pQtok->GetToken () );
pQtok->SetBuffer ( ( BYTE * ) "life:\\)", 7 );
ASSERT_STREQ( ( char * ) pQtok->GetToken (), "life:)" );
ASSERT_FALSE ( pQtok->GetToken () );
}
TEST_F ( TokenizerGtest, special_blended )
{
pTokenizer = CreateTestTokenizer ( TOK_NO_DASH );
ASSERT_TRUE ( pTokenizer->SetBlendChars ( "., -", sError ) );
pTokenizer->AddSpecials ( "-" );
pTokenizer->AddPlainChars ( "=" );
ASSERT_TRUE ( pTokenizer->SetBlendMode ( "trim_none, skip_pure", sError ) );
char sTest10[] = "hello =- =world";
pTokenizer->SetBuffer ( ( BYTE * ) sTest10, (int) strlen ( sTest10 ) );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "hello" );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "=world" );
}
TEST_F ( TokenizerGtest, noascii_case )
{
pTokenizer = Tokenizer::Detail::CreateUTF8Tokenizer ();
ASSERT_TRUE (
pTokenizer->SetCaseFolding ( "U+410..U+42F->U+430..U+44F, U+430..U+44F, U+401->U+451, U+451", sError ) );
char sTest20[] = "abc \xD0\xBE\xD0\xBF\xD0\xB0\x58\xD1\x87\xD0\xB0 def";
pTokenizer->SetBuffer ( ( BYTE * ) sTest20, (int) strlen ( sTest20 ) );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "\xD0\xBE\xD0\xBF\xD0\xB0" );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "\xD1\x87\xD0\xB0" );
ASSERT_FALSE ( pTokenizer->GetToken () );
}
TEST_F ( TokenizerGtest, utf8_ngrams )
{
pTokenizer = Tokenizer::Detail::CreateUTF8NgramTokenizer ();
ASSERT_FALSE ( pTokenizer->SetNgramChars ( "2..4", sError ) );
ASSERT_TRUE ( pTokenizer->SetCaseFolding ( "0..9, A..Z->a..z, _, a..z", sError ) );
ASSERT_TRUE ( pTokenizer->SetNgramChars ( "U+410..U+42F->U+430..U+44F, U+430..U+44F", sError ) );
}
TEST_F ( TokenizerGtest, utf8_4bytes_codepoints )
{
BYTE sTest21[] = "\xF4\x80\x80\x80\x32\x34\x20";
BYTE sTest22[] = "\xEC\x97\xB0";
BYTE sRes21[SPH_MAX_WORD_LEN];
memset ( sRes21, 0, sizeof ( sRes21 ) );
const BYTE * pTest21 = sTest21;
int iCode21 = sphUTF8Decode ( pTest21 );
ASSERT_EQ ( sphUTF8Encode ( sRes21, iCode21 ), 4 );
ASSERT_EQ ( sTest21[0], sRes21[0] );
ASSERT_EQ ( sTest21[1], sRes21[1] );
ASSERT_EQ ( sTest21[2], sRes21[2] );
ASSERT_EQ ( sTest21[3], sRes21[3] );
memset ( sRes21, 0, sizeof ( sRes21 ) );
BYTE * pRes21 = sRes21;
SPH_UTF8_ENCODE ( pRes21, iCode21 );
ASSERT_EQ ( sTest21[0], sRes21[0] );
ASSERT_EQ ( sTest21[1], sRes21[1] );
ASSERT_EQ ( sTest21[2], sRes21[2] );
ASSERT_EQ ( sTest21[3], sRes21[3] );
memset ( sRes21, 0, sizeof ( sRes21 ) );
pTest21 = sTest22;
int iCode22 = sphUTF8Decode ( pTest21 );
ASSERT_EQ ( iCode22, 0xC5F0 );
ASSERT_EQ ( sphUTF8Encode ( sRes21, iCode22 ), 3 );
ASSERT_FALSE ( memcmp ( sTest22, sRes21, sizeof ( sTest22 ) ) );
memset ( sRes21, 0, sizeof ( sRes21 ) );
pRes21 = sRes21;
SPH_UTF8_ENCODE ( pRes21, iCode22 );
ASSERT_FALSE ( memcmp ( sTest22, sRes21, sizeof ( sTest22 ) ) );
pTokenizer = Tokenizer::Detail::CreateUTF8Tokenizer ();
pTokenizer->SetBuffer ( ( BYTE * ) sTest21, sizeof ( sTest21 ) );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "\xF4\x80\x80\x80\x32\x34" );
}
TEST_F ( TokenizerGtest, Sentence )
{
const char * SENTENCE = "\2"; // MUST be in sync with sphinx.cpp
const char * sTest[] =
{
"Bill Gates Jr. attended", "bill", "gates", "jr", "attended", NULL,
"Very good, Dr. Watson", "very", "good", "dr", "watson", NULL,
"VERY GOOD, DR. WATSON", "very", "good", "dr", "watson", NULL,
"He left US. Went abroad", "he", "left", "us", SENTENCE, "went", "abroad", NULL,
"Known as Mr. Doe", "known", "as", "mr", "doe", NULL,
"Survived by Mrs. Doe", "survived", "by", "mrs", "doe", NULL,
"J. R. R. Tolkien", "j", "r", "r", "tolkien", NULL,
"That is it. A boundary", "that", "is", "it", SENTENCE, "a", "boundary", NULL,
"Just a sentence. And then some.", "just", "a", "sentence", SENTENCE, "and", "then", "some", SENTENCE, NULL,
"Right, guy number two? Yes, guy number one!", "right", "guy", "number", "two", SENTENCE, "yes", "guy", "number", "one", SENTENCE, NULL,
"S.T.A.L.K.E.R. sold well in the U.K and elsewhere. Including Russia.", "s", "t", "a", "l", "k", "e", "r", "sold", "well", "in", "the", "u", "k", "and", "elsewhere", SENTENCE, "including", "russia", SENTENCE, NULL,
"Yoyodine Inc. exists since 1800", "yoyodine", "inc", "exists", "since", "1800", NULL,
"John D. Doe, our CEO", "john", "d", "doe", "our", "ceo", NULL,
"Yoyodine Inc. (the Company)", "yoyodine", "inc", "the", "company", NULL,
NULL
};
CSphTokenizerSettings tSettings;
tSettings.m_iMinWordLen = 1;
StrVec_t dWarnings;
pTokenizer = Tokenizer::Create ( tSettings, nullptr, nullptr, dWarnings, sError );
ASSERT_TRUE ( pTokenizer->SetCaseFolding ( "-, 0..9, A..Z->a..z, _, a..z, U+80..U+FF", sError ) );
// ASSERT_TRUE ( pTok->SetBlendChars ( "., &", sError ) ); // NOLINT
ASSERT_TRUE ( pTokenizer->EnableSentenceIndexing ( sError ) );
int i = 0;
while ( sTest[i] )
{
pTokenizer->SetBuffer ( ( BYTE * ) sTest[i], (int) strlen ( sTest[i] ) );
i++;
BYTE * sTok;
while ( ( sTok = pTokenizer->GetToken () )!=NULL )
{
ASSERT_STREQ ( ( char * ) sTok, sTest[i] );
i++;
}
ASSERT_FALSE ( sTest[i] );
i++;
}
}
//////////////////////////////////////////////////////////////////////////
class TokenizerBlended : public TokenizerGtest
{
protected:
void SetUp () override
{
TokenizerGtest::SetUp ();
m_pTokenizer = CreateTestTokenizer ( 0 );
ASSERT_TRUE ( m_pTokenizer->SetBlendChars ( "., @", sError ) );
m_pTokenizer->AddSpecials ( "()!-\"@" );
}
};
TEST_F ( TokenizerBlended, texas )
{
char sTest1[] = "(texas.\\\")";
pTokenizer->SetBuffer ( ( BYTE * ) sTest1, (int) strlen ( sTest1 ) );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "(" );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "texas." );
ASSERT_TRUE ( pTokenizer->TokenIsBlended () );
pTokenizer->SkipBlended ();
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), ")" );
ASSERT_FALSE ( pTokenizer->GetToken ());
}
TEST_F ( TokenizerBlended, series2003 )
{
char sTest2[] = "\"series 2003\\-\\\"\"";
pTokenizer->SetBuffer ( ( BYTE * ) sTest2, (int) strlen ( sTest2 ) );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "\"" );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "series" );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "2003-" );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "\"" );
ASSERT_FALSE ( pTokenizer->GetToken () );
}
TEST_F ( TokenizerBlended, aa_lock_up_bb )
{
char sTest3[] = "aa lock.up bb";
pTokenizer->SetBuffer ( ( BYTE * ) sTest3, (int) strlen ( sTest3 ) );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "aa" );
ASSERT_FALSE ( pTokenizer->TokenIsBlended () );
ASSERT_FALSE ( pTokenizer->TokenIsBlendedPart () );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "lock.up" );
ASSERT_TRUE ( pTokenizer->TokenIsBlended () );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "lock" );
ASSERT_FALSE ( pTokenizer->TokenIsBlended () );
ASSERT_TRUE ( pTokenizer->TokenIsBlendedPart () );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "up" );
ASSERT_FALSE ( pTokenizer->TokenIsBlended () );
ASSERT_TRUE ( pTokenizer->TokenIsBlendedPart () );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "bb" );
ASSERT_FALSE ( pTokenizer->TokenIsBlended () );
ASSERT_FALSE ( pTokenizer->TokenIsBlendedPart () );
}
TEST_F ( TokenizerBlended, text_3rd )
{
char sTest4[] = "3.rd text";
pTokenizer->SetBuffer ( ( BYTE * ) sTest4, (int) strlen ( sTest4 ) );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "3.rd" );
ASSERT_TRUE ( pTokenizer->TokenIsBlended () );
ASSERT_TRUE ( pTokenizer->SkipBlended ()==1 );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "text" );
ASSERT_FALSE ( pTokenizer->TokenIsBlended () );
ASSERT_FALSE ( pTokenizer->GetToken () );
}
TEST_F ( TokenizerBlended, text_123rd )
{
char sTest5[] = "123\\@rd text";
pTokenizer->SetBuffer ( ( BYTE * ) sTest5, (int) strlen ( sTest5 ) );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "123@rd" );
ASSERT_TRUE ( pTokenizer->TokenIsBlended () );
ASSERT_TRUE ( pTokenizer->SkipBlended ()==2 );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "text" );
ASSERT_FALSE ( pTokenizer->TokenIsBlended () );
ASSERT_FALSE ( pTokenizer->GetToken () );
}
TEST_F ( TokenizerBlended, at_ta_c_da_bl_ok_yo_pest )
{
char sTest6[] = "at.ta\\.c.da\\.bl.ok yo pest";
pTokenizer->SetBuffer ( ( BYTE * ) sTest6, (int) strlen ( sTest6 ) );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "at.ta.c.da.bl.ok" );
ASSERT_TRUE ( pTokenizer->TokenIsBlended () );
ASSERT_TRUE ( pTokenizer->SkipBlended ()==5 );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "yo" );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "pest" );
ASSERT_FALSE ( pTokenizer->GetToken () );
}
TEST_F ( TokenizerBlended, text_3_at_rd )
{
char sTest7[] = "3\\@rd text";
pTokenizer->SetBuffer ( ( BYTE * ) sTest7, (int) strlen ( sTest7 ) );
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "3@rd" );
ASSERT_TRUE ( pTokenizer->TokenIsBlended () );
ASSERT_TRUE ( pTokenizer->SkipBlended ()==1 ); // because 3 is overshort!
ASSERT_STREQ ( ( const char * ) pTokenizer->GetToken (), "text" );
ASSERT_FALSE ( pTokenizer->TokenIsBlended () );
ASSERT_FALSE ( pTokenizer->GetToken () );
}
//////////////////////////////////////////////////////////////////////////
TEST( UTF8LEN, Test1 )
{
ASSERT_EQ ( sphUTF8Len ( "ab\0cd", 256 ), 2 );
}
TEST( UTF8LEN, Test2 )
{
ASSERT_FALSE ( sphUTF8Len ( "", 256 ) );
ASSERT_FALSE ( sphUTF8Len ( NULL, 256 ) );
}
//////////////////////////////////////////////////////////////////////////
class TokenizerP : public TokenizerGtest, public ::testing::WithParamInterface<DWORD>
{
protected:
int iRun = 0;
void SetUp () override
{
TokenizerGtest::SetUp ();
iRun = GetParam();
m_pTokenizer = CreateTestTokenizer ( ( iRun>=2 ) ? TOK_EXCEPTIONS : 0 );
}
};
TEST_P( TokenizerP, OneLineTests )
{
const char * dTests[] =
{
"1", "", NULL, // test that empty strings work
"1", "this is my rifle", "this", "is", "my", "rifle", NULL, // test that tokenizing works
"1", "This is MY rifle", "this", "is", "my", "rifle", NULL, // test that folding works
"1", "i-phone", "i-phone", NULL, // test that duals (specials in the middle of the word) work ok
"1", "i phone", "phone", NULL, // test that short words are skipped
"1", "this is m", "this", "is", NULL, // test that short words at the end are skipped
"1", "the -phone", "the", "-", "phone", NULL, // test that specials work
"1", "the!phone", "the", "!", "phone", NULL, // test that specials work
"1", "i!phone", "!", "phone", NULL, // test that short words preceding specials are skipped
"1", "/-hi", "-", "hi", NULL, // test that synonym-dual but folded-special chars work ok
"2", "AT&T", "AT&T", NULL, // test that synonyms work
"2", "AT & T", "AT & T", NULL, // test that synonyms with spaces work
"2", "AT & T", "AT & T", NULL, // test that synonyms with continuous spaces work
"2", "-AT&T", "-", "AT&T", NULL, // test that synonyms with specials work
"2", "AT&", "at", NULL, // test that synonyms prefixes are not lost on eof
"2", "AT&tee.yo", "at", "tee", "yo", NULL, // test that non-synonyms with partially matching prefixes work
"2", "standarten fuehrer", "Standartenfuehrer", NULL,
"2", "standarten fuhrer", "Standartenfuehrer", NULL,
"2", "standarten fuehrerr", "standarten", "fuehrerr", NULL,
"2", "standarten fuehrer Stirlitz", "Standartenfuehrer", "stirlitz", NULL,
"2", "standarten fuehrer Zog", "Standartenfuehrer", "zog", NULL,
"2", "stand\\arten fue\\hrer Zog", "Standartenfuehrer", "zog", NULL,
"2", "OS/2 vs OS/360 vs Ms-Dos", "OS/2", "vs", "os", "360", "vs", "MS-DOS", NULL,
"2", "AT ", "at", NULL, // test that prefix-whitespace-eof combo does not hang
"2", "AT&T&TT", "AT&T", "tt", NULL,
"2", "http://OS/2", "http", "OS/2", NULL,
"2", "AT*&*T", "at", NULL,
"2", "# OS/2's system install", "OS/2", "system", "install", NULL,
"2", "IBM-s/OS/2/Merlin", "ibm-s", "OS/2", "merlin", NULL,
"2", "U.S.A", "US", NULL,
"2", "AT&T!", "AT&T", "!", NULL, // exceptions vs specials
"2", "AT&T!!!", "AT&T", "!", "!", "!", NULL, // exceptions vs specials
"2", "U.S.A.!", "USA", "!", NULL, // exceptions vs specials
"2", "MS DOSS feat.Deskview.MS DOS", "ms", "doss", "featuring", "deskview", "MS-DOS", NULL,
"2", g_sMagic, "test", NULL,
"2", "U.S. U.S.A. U.S.A.F.", "US", "USA", "USAF", NULL,
"2", "U.S.AB U.S.A. U.S.B.U.S.D.U.S.U.S.A.F.", "US", "ab", "USA", "USB", "USD", "US", "USAF", NULL,
"3", "phon\\e", "phone", NULL,
"3", "\\thephone", "thephone", NULL,
"3", "the\\!phone", "the", "phone", NULL,
"3", "\\!phone", "phone", NULL,
"3", "\\\\phone", "phone", NULL, // the correct behavior if '\' is not in charset
"3", "pho\\\\ne", "pho", "ne", NULL,
"3", "phon\\\\e", "phon", NULL,
"3", "trailing\\", "trailing", NULL,
NULL
};
for ( int iCur = 0; dTests[iCur] && ( dTests[iCur++][0]-'0' )<=iRun; )
{
m_pTokenizer->SetBuffer ( ( BYTE * ) dTests[iCur], (int) strlen ( dTests[iCur] ) );
iCur++;
for ( BYTE * pToken = m_pTokenizer->GetToken (); pToken; pToken = m_pTokenizer->GetToken () )
{
ASSERT_TRUE ( dTests[iCur] );
ASSERT_STREQ ( ( const char * ) pToken, dTests[iCur] );
iCur++;
}
ASSERT_FALSE ( dTests[iCur] );
iCur++;
}
}
TEST_P( TokenizerP, MiscOneLineTests )
{
// test misc one-liners
const char * dTests2[] = { "\xC2\x80\xC2\x81\xC2\x82", "\xC2\x80\xC2\x81\xC2\x82", NULL, NULL };
for ( int iCur = 0; dTests2[iCur]; )
{
m_pTokenizer->SetBuffer ( ( BYTE * ) dTests2[iCur], (int) strlen ( dTests2[iCur] ) );
iCur++;
for ( BYTE * pToken = m_pTokenizer->GetToken (); pToken; pToken = m_pTokenizer->GetToken () )
{
ASSERT_TRUE ( dTests2[iCur] );
ASSERT_STREQ ( ( const char * ) pToken, dTests2[iCur] );
iCur++;
}
ASSERT_FALSE ( dTests2[iCur] );
iCur++;
}
}
TEST_P( TokenizerP, utf8error )
{
const char * sLine3 = "hi\xd0\xffh";
m_pTokenizer->SetBuffer ( ( BYTE * ) sLine3, 4 );
ASSERT_STREQ ( ( char * ) m_pTokenizer->GetToken (), "hi" );
}
TEST_P( TokenizerP, uberlong )
{
const int UBERLONG = 4096;
char * sLine4 = new char[UBERLONG + 1];
memset ( sLine4, 'a', UBERLONG );
sLine4[UBERLONG] = '\0';
char sTok4[SPH_MAX_WORD_LEN + 1];
memset ( sTok4, 'a', SPH_MAX_WORD_LEN );
sTok4[SPH_MAX_WORD_LEN] = '\0';
m_pTokenizer->SetBuffer ( ( BYTE * ) sLine4, (int) strlen ( sLine4 ) );
ASSERT_STREQ ( ( char * ) m_pTokenizer->GetToken (), sTok4 );
ASSERT_FALSE ( m_pTokenizer->GetToken () );
SafeDeleteArray ( sLine4 );
}
TEST_P( TokenizerP, uberlong_synonim_only )
{
const int UBERLONG = 4096;
if ( iRun==2 )
{
char * sLine4 = new char[UBERLONG + 1];
memset ( sLine4, '/', UBERLONG );
sLine4[UBERLONG] = '\0';
m_pTokenizer->SetBuffer ( ( BYTE * ) sLine4, (int) strlen ( sLine4 ) );
ASSERT_FALSE( m_pTokenizer->GetToken () );
for ( int i = 0; i<UBERLONG - 3; i += 3 )
{
sLine4[i + 0] = 'a';
sLine4[i + 1] = 'a';
sLine4[i + 2] = '/';
sLine4[i + 3] = '\0';
}
m_pTokenizer->SetBuffer ( ( BYTE * ) sLine4, (int) strlen ( sLine4 ) );
for ( int i = 0; i<UBERLONG - 3; i += 3 )
ASSERT_STREQ ( ( char * ) m_pTokenizer->GetToken (), "aa" );
ASSERT_FALSE ( m_pTokenizer->GetToken () );
SafeDeleteArray ( sLine4 );
}
}
TEST_P ( TokenizerP, short_token_handling )
{
const char * dTestsShort[] =
{
"ab*", "ab*", NULL,
"*ab", "*ab", NULL,
"abcdef", "abcdef", NULL,
"ab *ab* abc", "*ab*", NULL,
NULL
};
TokenizerRefPtr_c pShortTokenizer = m_pTokenizer->Clone ( SPH_CLONE_QUERY );
pShortTokenizer->AddPlainChars ( "*" );
CSphTokenizerSettings tSettings = pShortTokenizer->GetSettings ();
tSettings.m_iMinWordLen = 5;
pShortTokenizer->Setup ( tSettings );
for ( int iCur = 0; dTestsShort[iCur]; )
{
pShortTokenizer->SetBuffer ( ( BYTE * ) ( dTestsShort[iCur] ), (int) strlen ( ( const char * ) dTestsShort[iCur] ) );
iCur++;
for ( BYTE * pToken = pShortTokenizer->GetToken (); pToken; pToken = pShortTokenizer->GetToken () )
{
ASSERT_TRUE ( dTestsShort[iCur] );
ASSERT_STREQ ( ( const char * ) pToken, dTestsShort[iCur] );
iCur++;
}
ASSERT_FALSE ( dTestsShort[iCur] );
iCur++;
}
}
TEST_P( TokenizerP, boundaries )
{
CSphString sError;
ASSERT_TRUE ( m_pTokenizer->SetBoundary ( "?", sError ) );
char sLine5[] = "hello world? testing boundaries?";
m_pTokenizer->SetBuffer ( ( BYTE * ) sLine5, (int) strlen ( sLine5 ) );
ASSERT_STREQ ( ( const char * ) m_pTokenizer->GetToken (), "hello" );
ASSERT_FALSE ( m_pTokenizer->GetBoundary () );
ASSERT_STREQ ( ( const char * ) m_pTokenizer->GetToken (), "world" );
ASSERT_FALSE ( m_pTokenizer->GetBoundary () );
ASSERT_STREQ ( ( const char * ) m_pTokenizer->GetToken (), "testing" );
ASSERT_TRUE ( m_pTokenizer->GetBoundary () );
ASSERT_STREQ ( ( const char * ) m_pTokenizer->GetToken (), "boundaries" );
ASSERT_FALSE( m_pTokenizer->GetBoundary () );
}
TEST_P( TokenizerP, specials_vs_tokens_start_end_ptrs )
{
char sLine6[] = "abc!def";
m_pTokenizer->SetBuffer ( ( BYTE * ) sLine6, (int) strlen ( sLine6 ) );
ASSERT_STREQ ( ( const char * ) m_pTokenizer->GetToken (), "abc" );
ASSERT_EQ( *m_pTokenizer->GetTokenStart (), 'a' );
ASSERT_EQ ( *m_pTokenizer->GetTokenEnd (), '!' );
ASSERT_STREQ( ( const char * ) m_pTokenizer->GetToken (), "!" );
ASSERT_EQ ( *m_pTokenizer->GetTokenStart (), '!' );
ASSERT_EQ ( *m_pTokenizer->GetTokenEnd (), 'd' );
ASSERT_STREQ( ( const char * ) m_pTokenizer->GetToken (), "def" );
ASSERT_EQ ( *m_pTokenizer->GetTokenStart (), 'd' );
ASSERT_EQ ( *m_pTokenizer->GetTokenEnd (), '\0' );
}
TEST_P( TokenizerP, embedded_zeroes )
{
char sLine7[] = "abc\0\0\0defgh";
m_pTokenizer->SetBuffer ( ( BYTE * ) sLine7, 9 );
ASSERT_STREQ ( ( const char * ) m_pTokenizer->GetToken (), "abc" );
ASSERT_STREQ ( ( const char * ) m_pTokenizer->GetToken (), "def" );
ASSERT_FALSE( m_pTokenizer->GetToken () );
ASSERT_FALSE( m_pTokenizer->GetToken () );
ASSERT_FALSE( m_pTokenizer->GetToken () );
}
INSTANTIATE_TEST_SUITE_P ( Run3Times, TokenizerP, ::testing::Values ( 1, 2, 3 ) );
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
struct CmpAtomPos_fn
{
bool IsLess ( const XQKeyword_t * pA, const XQKeyword_t * pB ) const
{
return pA->m_iAtomPos<pB->m_iAtomPos;
}
};
static void CheckQuerySoftSpace ( const XQNode_t * pNode, const int * pQPos, int iCount )
{
ASSERT_TRUE ( pNode );
CSphVector<const XQKeyword_t *> dTerms;
CSphVector<const XQNode_t *> dChildren;
dChildren.Add ( pNode );
ARRAY_FOREACH ( i, dChildren )
{
const XQNode_t * pChild = dChildren[i];
for ( auto * pChildren : pChild->m_dChildren )
dChildren.Add ( pChildren );
for ( auto & dWord : pChild->m_dWords )
dTerms.Add ( &dWord );
}
dTerms.Sort ( CmpAtomPos_fn () );
ASSERT_EQ ( iCount, dTerms.GetLength () );
ARRAY_FOREACH ( i, dTerms )
{
ASSERT_EQ ( dTerms[i]->m_iAtomPos, pQPos[i] );
}
ASSERT_STREQ ( dTerms[0]->m_sWord.cstr (), "me" );
ASSERT_STREQ ( dTerms.Last ()->m_sWord.cstr (), "off" );
}
class QueryParser : public TokenizerGtest
{
protected:
void TearDown () override
{
TokenizerGtest::TearDown ();
}
void SetUp () override
{
tSchema.AddField ( "title" );
tSchema.AddField ( "body" );
TokenizerRefPtr_c pBase = Tokenizer::Detail::CreateUTF8Tokenizer ();
CSphTokenizerSettings tTokenizerSetup;
tTokenizerSetup.m_iMinWordLen = 2;
tTokenizerSetup.m_sSynonymsFile = g_sTmpfile;
pBase->Setup ( tTokenizerSetup );
StrVec_t dWarnings;
ASSERT_TRUE ( pBase->LoadSynonyms ( g_sTmpfile, NULL, dWarnings, sError ) );
ASSERT_TRUE ( sError.IsEmpty() );
pTokenizer = sphCloneAndSetupQueryTokenizer ( pBase, true, false, false );
CSphDictSettings tDictSettings;
tDictSettings.m_bWordDict = false;
pDict = sphCreateDictionaryCRC ( tDictSettings, NULL, pTokenizer, "query", false, 32, nullptr, sError );
ASSERT_TRUE ( pTokenizer );
ASSERT_TRUE ( pDict );
}
DictRefPtr_c pDict;
CSphSchema tSchema;
CSphIndexSettings tTmpSettings;
};
TEST_F ( QueryParser, test_many )
{
struct QueryTest_t
{
const char * m_sQuery;
const char * m_sReconst;
};
const QueryTest_t dTests[] =
{
{ "aaa bbb ccc" , "( aaa bbb ccc )" }
, { "aaa|bbb ccc" , "( ( aaa | bbb ) ccc )" }
, { "aaa bbb|ccc" , "( aaa ( bbb | ccc ) )" }
, { "aaa (bbb ccc)|ddd" , "( aaa ( ( bbb ccc ) | ddd ) )" }
, { "aaa bbb|(ccc ddd)" , "( aaa ( bbb | ( ccc ddd ) ) )" }
, { "aaa bbb|(ccc ddd)|eee|(fff)" , "( aaa ( bbb | ( ccc ddd ) | eee | fff ) )" }
, { "aaa bbb|(ccc ddd) eee|(fff)" , "( aaa ( bbb | ( ccc ddd ) ) ( eee | fff ) )" }
, { "aaa (ccc ddd)|bbb|eee|(fff)" , "( aaa ( ( ccc ddd ) | bbb | eee | fff ) )" }
, { "aaa (ccc ddd)|bbb eee|(fff)" , "( aaa ( ( ccc ddd ) | bbb ) ( eee | fff ) )" }
, { "aaa \"bbb ccc\"~5|ddd" , "( aaa ( \"bbb ccc\"~5 | ddd ) )" }
, { "aaa bbb|\"ccc ddd\"~5" , "( aaa ( bbb | \"ccc ddd\"~5 ) )" }
, { "aaa ( ( \"bbb ccc\"~3|ddd ) eee | ( fff -ggg ) )", "( aaa ( ( \"bbb ccc\"~3 | ddd ) ( eee | ( fff AND NOT ggg ) ) ) )" }
, { "@title aaa @body ccc|(@title ddd eee)|fff ggg" , "( ( @title: aaa ) ( ( @body: ccc ) | ( ( @title: ddd ) ( @title: eee ) ) | ( @body: fff ) ) ( @body: ggg ) )" }
, { "@title hello world | @body sample program" , "( ( @title: hello ) ( ( @title: world ) | ( @body: sample ) ) ( @body: program ) )" }
, { "@title one two three four" , "( ( @title: one ) ( @title: two ) ( @title: three ) ( @title: four ) )" }
, { "@title one (@body two three) four" , "( ( @title: one ) ( ( @body: two ) ( @body: three ) ) ( @title: four ) )" }
, { "windows 7 2000" , "( windows 2000 )" }
, { "aaa a|bbb" , "( aaa bbb )" }
, { "aaa bbb|x y z|ccc" , "( aaa bbb ccc )" }
, { "a" , "" }
, { "hello -world" , "( hello AND NOT world )" }
, { "-hello world" , "( world AND NOT hello )" }
, { "\"phrase (query)/3 ~on steroids\"" , "\"phrase query on steroids\"" }
, { "hello a world" , "( hello world )" }
, { "-one" , "" }
, { "-one -two" , "" }
, { "\"\"" , "" }
, { "\"()\"" , "" }
, { "\"]\"" , "" }
, { "@title hello @body -world" , "( ( @title: hello ) AND NOT ( @body: world ) )" }
, { "Ms-Dos" , "MS-DOS" }
};
for ( auto & dTest : dTests )
{
XQQuery_t tQuery;
sphParseExtendedQuery ( tQuery, dTest.m_sQuery, nullptr, pTokenizer, &tSchema, pDict, tTmpSettings, nullptr );
CSphString sReconst = sphReconstructNode ( tQuery.m_pRoot, &tSchema );
ASSERT_STREQ ( sReconst.cstr(), dTest.m_sReconst );
}
}
TEST_F ( QueryParser, NEAR_with_NOT )
{
XQQuery_t tQuery;
ASSERT_FALSE ( sphParseExtendedQuery ( tQuery, "me -test NEAR/2 off", nullptr, pTokenizer, &tSchema, pDict, tTmpSettings, nullptr ) );
ASSERT_FALSE ( tQuery.m_pRoot );
}
TEST_F ( QueryParser, soft_whitespace1 )
{
XQQuery_t tQuery;
ASSERT_TRUE ( sphParseExtendedQuery ( tQuery, "me [ off", nullptr, pTokenizer, &tSchema, pDict, tTmpSettings, nullptr ) );
int dQpos[] = { 1, 2 };
SCOPED_TRACE( "soft_whitespace1" );
CheckQuerySoftSpace ( tQuery.m_pRoot, dQpos, sizeof ( dQpos ) / sizeof ( dQpos[0] ) );
}
TEST_F ( QueryParser, soft_whitespace2 )
{
XQQuery_t tQuery;
ASSERT_TRUE ( sphParseExtendedQuery ( tQuery, "me [ ,, &&,[ off", nullptr, pTokenizer, &tSchema, pDict, tTmpSettings, nullptr ) );
int dQpos[] = { 1, 2 };
SCOPED_TRACE( "soft_whitespace2" );
CheckQuerySoftSpace ( tQuery.m_pRoot, dQpos, sizeof ( dQpos ) / sizeof ( dQpos[0] ) );
}
TEST_F ( QueryParser, soft_whitespace3 )
{
XQQuery_t tQuery;
ASSERT_TRUE ( sphParseExtendedQuery ( tQuery, "me \xE7\xA7\x81\xE3\x81\xAF\xE3\x82\xAC off", nullptr, pTokenizer, &tSchema, pDict, tTmpSettings, nullptr ) );
int dQpos[] = { 1, 3 };
SCOPED_TRACE( "soft_whitespace3" );
CheckQuerySoftSpace ( tQuery.m_pRoot, dQpos, sizeof ( dQpos ) / sizeof ( dQpos[0] ) );
}
TEST_F ( QueryParser, soft_whitespace4 )
{
XQQuery_t tQuery;
ASSERT_TRUE (
sphParseExtendedQuery ( tQuery, "me \xE7\xA7\x81\xE3\x81\xAF\xE3\x82\xAC \xE7\xA7\x81\xE3\x81\xAF\xE3\x82\xAC \xE7\xA7\x81\xE3\x81\xAF\xE3\x82\xAC off", nullptr, pTokenizer, &tSchema, pDict, tTmpSettings, nullptr ) );
int dQpos[] = { 1, 3 };
SCOPED_TRACE( "soft_whitespace4" );
CheckQuerySoftSpace ( tQuery.m_pRoot, dQpos, sizeof ( dQpos ) / sizeof ( dQpos[0] ) );
}
class CSphDummyIndex : public CSphIndexStub
{
public:
SmallStringHash_T<int> m_hHits;
CSphDummyIndex() : CSphIndexStub ( nullptr, nullptr ) {}
bool FillKeywords ( CSphVector <CSphKeywordInfo> & dKeywords ) const override;
};
bool CSphDummyIndex::FillKeywords ( CSphVector <CSphKeywordInfo> & dKeywords ) const
{
ARRAY_FOREACH ( i, dKeywords )
{
int * pDocs = m_hHits ( dKeywords[i].m_sTokenized );
dKeywords[i].m_iDocs = pDocs ? *pDocs : 0;
}
return true;
}
TEST_F ( QueryParser, query_transforms )
{
struct CKeywordHits {
const char * m_sKeyword;
int m_iHits;
};
struct QueryTest_t
{
const char * m_sQuery;
const char * m_sReconst;
const char * m_sReconstTransformed;
const CKeywordHits * m_pKeywordHits;
};
const CKeywordHits dPseudoHits [][10] =
{
{ { "nnn", 10 }, { "aaa", 1 }, { "bbb", 1 }, { 0, 0 } },
{ { "nnn", 10 }, { "aaa", 100 }, { "bbb", 200 }, { 0, 0 } },
{ { "nnn", 10 }, { "aaa", 1 }, { "bbb", 2 }, { "qqq", 500 }, { "www", 100 }, { 0, 0 } }
};
const QueryTest_t dTest[] =
{
// COMMON NOT
{
"( aaa !ccc ) | ( bbb !ccc )",
"( ( aaa AND NOT ccc ) | ( bbb AND NOT ccc ) )",
"( ( aaa | bbb ) AND NOT ccc )",
NULL
},
{
"( aaa bbb !ccc) | ( ddd eee !ccc ) ",
"( ( ( aaa bbb ) AND NOT ccc ) | ( ( ddd eee ) AND NOT ccc ) )",
"( ( ( aaa bbb ) | ( ddd eee ) ) AND NOT ccc )",
NULL
},
{
"( aaa bbb !ccc) | ( ddd eee !ccc ) | fff | ( ggg !jjj )",
"( ( ( aaa bbb ) AND NOT ccc ) | ( ( ddd eee ) AND NOT ccc ) | fff | ( ggg AND NOT jjj ) )",
"( ( ( ( aaa bbb ) | ( ddd eee ) ) AND NOT ccc ) | fff | ( ggg AND NOT jjj ) )",
NULL
},
{
"(aaa !bbb) | (ccc !bbb) | (ccc !eee) | (ddd !eee)",
"( ( aaa AND NOT bbb ) | ( ccc AND NOT bbb ) | ( ccc AND NOT eee ) | ( ddd AND NOT eee ) )",
"( ( ( aaa | ccc ) AND NOT bbb ) | ( ( ccc | ddd ) AND NOT eee ) )",
NULL
},
{
"((( aaa & bbb & ccc ) !eee) | ((kkk | jjj & kkk & (zzz | jjj)) !eee))",
"( ( ( aaa bbb ccc ) AND NOT eee ) | ( ( ( kkk | jjj ) kkk ( zzz | jjj ) ) AND NOT eee ) )",
"( ( ( aaa bbb ccc ) | ( ( kkk | jjj ) kkk ( zzz | jjj ) ) ) AND NOT eee )",
NULL
},
{
"(aaa !(aaa !nnn)) | (bbb !(aaa !nnn))",
"( ( aaa AND NOT ( aaa AND NOT nnn ) ) | ( bbb AND NOT ( aaa AND NOT nnn ) ) )",
"( ( aaa | bbb ) AND NOT ( aaa AND NOT nnn ) )",
NULL
},
// COMMON NOT WITH MIXED PHRASES/PROXIMITY terms
{
"(aaa !(\"zzz yyy\")) | (bbb !(\"zzz yyy\"~30)) | (ccc !(\"zzz yyy\"~20))",
"( ( aaa AND NOT \"zzz yyy\" ) | ( bbb AND NOT \"zzz yyy\"~30 ) | ( ccc AND NOT \"zzz yyy\"~20 ) )",
"( ( aaa | bbb | ccc ) AND NOT \"zzz yyy\"~30 )",
NULL
},
// COMMON COMPOUND NOT
{
"(aaa !(nnn ccc)) | (bbb !(nnn ddd))",
"( ( aaa AND NOT ( nnn ccc ) ) | ( bbb AND NOT ( nnn ddd ) ) )",
"( ( aaa AND NOT ccc ) | ( bbb AND NOT ddd ) | ( ( aaa | bbb ) AND NOT nnn ) )",
( const CKeywordHits * ) &dPseudoHits[0]
},
{
"(aaa !(ccc nnn)) | (bbb !(nnn ddd)) | (ccc !nnn)",
"( ( aaa AND NOT ( ccc nnn ) ) | ( bbb AND NOT ( nnn ddd ) ) | ( ccc AND NOT nnn ) )",
"( ( aaa AND NOT ccc ) | ( bbb AND NOT ddd ) | ( ( ccc | aaa | bbb ) AND NOT nnn ) )",
( const CKeywordHits * ) &dPseudoHits[0]
},
{
"(aaa !(ccc nnn)) | (bbb !(nnn ddd))",
"( ( aaa AND NOT ( ccc nnn ) ) | ( bbb AND NOT ( nnn ddd ) ) )",
"( ( aaa AND NOT ( ccc nnn ) ) | ( bbb AND NOT ( nnn ddd ) ) )",
( const CKeywordHits * ) &dPseudoHits[1]
},
// COMMON COMPOUND NOT WITH MIXED PHRASES/PROXIMITY terms
{
"(aaa !(ccc \"nnn zzz\"~20)) | (bbb !(\"nnn zzz\"~10 ddd)) | (ccc !\"nnn zzz\")",
"( ( aaa AND NOT ( ccc \"nnn zzz\"~20 ) ) | ( bbb AND NOT ( \"nnn zzz\"~10 ddd ) ) | ( ccc AND NOT \"nnn zzz\" ) )",
"( ( aaa AND NOT ccc ) | ( bbb AND NOT ddd ) | ( ( ccc | aaa | bbb ) AND NOT \"nnn zzz\"~20 ) )",
( const CKeywordHits * ) &dPseudoHits[0]
},
// COMMON SUBTERM
{
"(aaa (nnn | ccc)) | (bbb (nnn | ddd))",
"( ( aaa ( nnn | ccc ) ) | ( bbb ( nnn | ddd ) ) )",
"( ( aaa ccc ) | ( bbb ddd ) | ( ( aaa | bbb ) nnn ) )",
( const CKeywordHits * ) &dPseudoHits[0]
},
{
"(aaa (ccc | nnn)) | (bbb (nnn | ddd)) | (ccc | nnn)",
"( ( aaa ( ccc | nnn ) ) | ( bbb ( nnn | ddd ) ) | ( ccc | nnn ) )",
"( ( aaa ccc ) | ( bbb ddd ) | ccc | nnn | ( ( aaa | bbb ) nnn ) )",
( const CKeywordHits * ) &dPseudoHits[0]
},
{
"(aaa (ccc | nnn)) | (bbb (nnn | ddd))",
"( ( aaa ( ccc | nnn ) ) | ( bbb ( nnn | ddd ) ) )",
"( ( aaa ( ccc | nnn ) ) | ( bbb ( nnn | ddd ) ) )",
( const CKeywordHits * ) &dPseudoHits[1]
},
// COMMON SUBTERM WITH MIXED PHRASES/PROXIMITY terms
{
"(aaa (ccc | \"qqq www\"~10)) | (bbb (\"qqq www\" | ddd)) | (ccc | \"qqq www\"~20)",
"( ( aaa ( ccc | \"qqq www\"~10 ) ) | ( bbb ( \"qqq www\" | ddd ) ) | ( ccc | \"qqq www\"~20 ) )",
"( ( aaa ccc ) | ( bbb ddd ) | ccc | \"qqq www\"~20 | ( ( aaa | bbb ) \"qqq www\"~10 ) )",
( const CKeywordHits * ) &dPseudoHits[2]
},
// COMMON KEYWORDS
{
"\"aaa bbb ccc ddd jjj\" | \"aaa bbb\"",
"( \"aaa bbb ccc ddd jjj\" | \"aaa bbb\" )",
"\"aaa bbb\"",
NULL
},
{
"bbb | \"aaa bbb ccc\"",
"( bbb | \"aaa bbb ccc\" )",
"bbb",
NULL
},
{
"\"aaa bbb ccc ddd jjj\" | \"bbb ccc\"",
"( \"aaa bbb ccc ddd jjj\" | \"bbb ccc\" )",
"\"bbb ccc\"",
NULL
},
{
"\"aaa bbb ccc ddd jjj\" | \"bbb jjj\"",
"( \"aaa bbb ccc ddd jjj\" | \"bbb jjj\" )",
"( \"aaa bbb ccc ddd jjj\" | \"bbb jjj\" )",
NULL
},
// FIXME!!! add exact phrase elimination
{
"\"aaa bbb ccc\"~10 | \"aaa bbb ccc ddd\"~20 | \"aaa bbb ccc\"~10 | \"aaa bbb ccc\"~10",
"( \"aaa bbb ccc\"~10 | \"aaa bbb ccc ddd\"~20 | \"aaa bbb ccc\"~10 | \"aaa bbb ccc\"~10 )",
// "( \"aaa bbb ccc ddd\"~20 | \"aaa bbb ccc\"~10 )",
"( \"aaa bbb ccc\"~10 | \"aaa bbb ccc ddd\"~20 | \"aaa bbb ccc\"~10 | \"aaa bbb ccc\"~10 )",
NULL
},
{
"\"aaa bbb ccc\"~10 | \"aaa bbb ccc ddd\"~10",
"( \"aaa bbb ccc\"~10 | \"aaa bbb ccc ddd\"~10 )",
"\"aaa bbb ccc\"~10",
NULL
},
{
"\"aaa bbb ccc\"~10 | \"aaa bbb ccc\"~10",
"( \"aaa bbb ccc\"~10 | \"aaa bbb ccc\"~10 )",
// "\"aaa bbb ccc\"~10",
"( \"aaa bbb ccc\"~10 | \"aaa bbb ccc\"~10 )",
NULL
},
{
"\"aaa bbb ccc\"~10 | \"aaa bbb ccc\"~9",
"( \"aaa bbb ccc\"~10 | \"aaa bbb ccc\"~9 )",
// "\"aaa bbb ccc\"~10",
"( \"aaa bbb ccc\"~10 | \"aaa bbb ccc\"~9 )",
NULL
},
{
"\"aaa bbb ccc ddd eee\" | \"bbb ccc ddd\"~10",
"( \"aaa bbb ccc ddd eee\" | \"bbb ccc ddd\"~10 )",
"\"bbb ccc ddd\"~10",
NULL
},
{
"\"bbb ccc ddd\"~10 | \"ccc ddd\" | \"aaa bbb\"",
"( \"bbb ccc ddd\"~10 | \"ccc ddd\" | \"aaa bbb\" )",
"( \"bbb ccc ddd\"~10 | \"ccc ddd\" | \"aaa bbb\" )",
NULL
},
{
"\"aaa bbb ccc ddd eee\" | \"bbb ccc ddd\"~10 | \"ccc ddd\" | \"aaa bbb\"",
"( \"aaa bbb ccc ddd eee\" | \"bbb ccc ddd\"~10 | \"ccc ddd\" | \"aaa bbb\" )",
"( \"bbb ccc ddd\"~10 | \"ccc ddd\" | \"aaa bbb\" )",
NULL
},
{
"aaa | \"aaa bbb\"~10 | \"aaa ccc\"",
"( aaa | \"aaa bbb\"~10 | \"aaa ccc\" )",
"aaa",
NULL
},
// COMMON PHRASES
{
"\"aaa bbb ccc ddd\" | \"eee fff ccc ddd\"",
"( \"aaa bbb ccc ddd\" | \"eee fff ccc ddd\" )",
"( \"( \"aaa bbb\" | \"eee fff\" ) \"ccc ddd\"\" )",
NULL
},
{
"\"ccc ddd aaa bbb\" | \"ccc ddd eee fff\"",
"( \"ccc ddd aaa bbb\" | \"ccc ddd eee fff\" )",
"( \"\"ccc ddd\" ( \"aaa bbb\" | \"eee fff\" )\" )",
NULL
},
{
"\"aaa bbb ccc ddd\" | \"eee fff ccc ddd\" | \"jjj lll\"",
"( \"aaa bbb ccc ddd\" | \"eee fff ccc ddd\" | \"jjj lll\" )",
"( \"jjj lll\" | ( \"( \"aaa bbb\" | \"eee fff\" ) \"ccc ddd\"\" ) )",
NULL
},
{
"\"ccc ddd aaa bbb\" | \"ccc ddd eee fff\" | \"jjj lll\"",
"( \"ccc ddd aaa bbb\" | \"ccc ddd eee fff\" | \"jjj lll\" )",
"( \"jjj lll\" | ( \"\"ccc ddd\" ( \"aaa bbb\" | \"eee fff\" )\" ) )",
NULL
},
{
"\"aaa bbb ccc ddd xxx yyy zzz\" | \"eee fff ddd xxx yyy zzz\" | \"jjj lll\"",
"( \"aaa bbb ccc ddd xxx yyy zzz\" | \"eee fff ddd xxx yyy zzz\" | \"jjj lll\" )",
"( \"jjj lll\" | ( \"( \"aaa bbb ccc\" | \"eee fff\" ) \"ddd xxx yyy zzz\"\" ) )",
NULL
},
{
"\"ddd xxx yyy zzz aaa bbb\" | \"ddd xxx yyy zzz ccc eee fff\" | \"jjj lll\"",
"( \"ddd xxx yyy zzz aaa bbb\" | \"ddd xxx yyy zzz ccc eee fff\" | \"jjj lll\" )",
"( \"jjj lll\" | ( \"\"ddd xxx yyy zzz\" ( \"aaa bbb\" | \"ccc eee fff\" )\" ) )",
NULL
},
{
"\"xxx zzz ccc ddd\" | \"xxx zzz yyy jjj kkk\" | \"xxx zzz yyy mmm nnn\"",
"( \"xxx zzz ccc ddd\" | \"xxx zzz yyy jjj kkk\" | \"xxx zzz yyy mmm nnn\" )",
"( \"\"xxx zzz\" ( \"ccc ddd\" | \"yyy jjj kkk\" | \"yyy mmm nnn\" )\" )",
NULL
},
{
"\"aaa bbb ddd www xxx yyy zzz\" | \"aaa bbb eee www xxx yyy zzz\"",
"( \"aaa bbb ddd www xxx yyy zzz\" | \"aaa bbb eee www xxx yyy zzz\" )",
"( \"( \"aaa bbb ddd\" | \"aaa bbb eee\" ) \"www xxx yyy zzz\"\" )",
NULL
},
{
"\"www xxx yyy zzz ddd aaa bbb\" | \"www xxx yyy zzz eee aaa bbb\"",
"( \"www xxx yyy zzz ddd aaa bbb\" | \"www xxx yyy zzz eee aaa bbb\" )",
"( \"\"www xxx yyy zzz\" ( \"ddd aaa bbb\" | \"eee aaa bbb\" )\" )",
NULL
},
{
"\"xxx yyy zzz ddd\" | \"xxx yyy zzz eee\"",
"( \"xxx yyy zzz ddd\" | \"xxx yyy zzz eee\" )",
"( \"\"xxx yyy zzz\" ( ddd | eee )\" )",
NULL
},
{
"\"ddd xxx yyy zzz\" | \"eee xxx yyy zzz\"",
"( \"ddd xxx yyy zzz\" | \"eee xxx yyy zzz\" )",
"( \"( ddd | eee ) \"xxx yyy zzz\"\" )",
NULL
},
// COMMON AND NOT FACTOR
{
"( aaa !xxx ) | ( aaa !yyy ) | ( aaa !zzz )",
"( ( aaa AND NOT xxx ) | ( aaa AND NOT yyy ) | ( aaa AND NOT zzz ) )",
"( aaa AND NOT ( xxx yyy zzz ) )",
NULL
},
{
"( aaa !xxx ) | ( aaa !yyy ) | ( aaa !zzz ) | ( bbb !xxx ) | ( bbb !yyy ) | ( bbb !zzz )",
"( ( aaa AND NOT xxx ) | ( aaa AND NOT yyy ) | ( aaa AND NOT zzz ) | ( bbb AND NOT xxx ) | ( bbb AND NOT yyy ) | ( bbb AND NOT zzz ) )",
"( ( aaa | bbb ) AND NOT ( xxx yyy zzz ) )",
NULL
},
// COMMON AND NOT FACTOR WITH MIXED PHRASES/PROXIMITY terms
{
"( \"aaa bbb\"~10 !xxx ) | ( \"aaa bbb\"~20 !yyy ) | ( \"aaa bbb\" !zzz )",
"( ( \"aaa bbb\"~10 AND NOT xxx ) | ( \"aaa bbb\"~20 AND NOT yyy ) | ( \"aaa bbb\" AND NOT zzz ) )",
"( \"aaa bbb\"~20 AND NOT ( yyy xxx zzz ) )",
NULL
},
// COMMON | NOT
{
"( aaa !(nnn | nnn1) ) | ( bbb !(nnn2 | nnn) )",
"( ( aaa AND NOT ( nnn | nnn1 ) ) | ( bbb AND NOT ( nnn2 | nnn ) ) )",
"( ( ( aaa AND NOT nnn1 ) | ( bbb AND NOT nnn2 ) ) AND NOT nnn )",
NULL
},
// ExcessAndNot
{
"( (aaa ( ( ( (fff (xxx !hhh)) !kkk ) ) bbb !ccc)) !ddd ) ( ( (zzz (xxx !vvv)) !kkk ) )",
"( ( aaa ( ( fff ( xxx AND NOT hhh ) bbb ) AND NOT ( kkk | ccc ) ) ( ( zzz ( xxx AND NOT vvv ) ) AND NOT kkk ) ) AND NOT ddd )",
"( ( aaa fff xxx bbb zzz xxx ) AND NOT ( vvv | hhh | kkk | kkk | ccc | ddd ) )",
NULL
},
// COMMON | NOT WITH MIXED PHRASES/PROXIMITY terms
{
"( aaa !( \"jjj kkk\"~10 | (aaa|nnn) ) ) | ( bbb !( fff | \"jjj kkk\" ) ) | ( ccc !( (hhh kkk) | \"jjj kkk\"~20 ) )",
"( ( aaa AND NOT ( \"jjj kkk\"~10 | ( aaa | nnn ) ) ) | ( bbb AND NOT ( fff | \"jjj kkk\" ) ) | ( ccc AND NOT ( ( hhh kkk ) | \"jjj kkk\"~20 ) ) )",
"( ( ( aaa AND NOT ( aaa | nnn ) ) | ( bbb AND NOT fff ) | ( ccc AND NOT ( hhh kkk ) ) ) AND NOT \"jjj kkk\"~20 )",
NULL
},
{
NULL, NULL, NULL, NULL
}
};
CSphIndexSettings tTmpSettings;
const QueryTest_t * pTest = dTest;
while ( pTest->m_sQuery )
{
XQQuery_t tQuery;
sphParseExtendedQuery ( tQuery, pTest->m_sQuery, nullptr, pTokenizer, &tSchema, pDict, tTmpSettings, nullptr );
CSphString sReconst = sphReconstructNode ( tQuery.m_pRoot, &tSchema );
CSphDummyIndex tIndex;
if ( pTest->m_pKeywordHits )
{
for ( const CKeywordHits * pHits = pTest->m_pKeywordHits; pHits->m_sKeyword; ++pHits )
EXPECT_TRUE ( tIndex.m_hHits.Add ( pHits->m_iHits, pHits->m_sKeyword ) );
}
sphTransformExtendedQuery ( &tQuery.m_pRoot, tTmpSettings, true, &tIndex );
CSphString sReconstTransformed = sphReconstructNode ( tQuery.m_pRoot, &tSchema );
ASSERT_STREQ ( sReconst.cstr(), pTest->m_sReconst );
ASSERT_STREQ ( sReconstTransformed.cstr(), pTest->m_sReconstTransformed );
pTest++;
}
}
TEST_F ( QueryParser, test_NOT )
{
struct QueryTest_t
{
const char * m_sQuery;
const char * m_sReconst;
const bool m_bNotAllowed;
};
const QueryTest_t dCases[] =
{
{ "-one", "", false }
, { "-one", "( AND NOT one )", true }
};
for ( const auto & tCase : dCases )
{
AllowOnlyNot ( tCase.m_bNotAllowed );
XQQuery_t tQuery;
sphParseExtendedQuery ( tQuery, tCase.m_sQuery, nullptr, pTokenizer, &tSchema, pDict, tTmpSettings, nullptr );
CSphString sReconst = sphReconstructNode ( tQuery.m_pRoot, &tSchema );
ASSERT_STREQ ( sReconst.cstr(), tCase.m_sReconst );
}
}
TEST ( Charsets, Merge_noIntersection )
{
CSphVector<RemapRangeTagged_t> dRanges;
AddRange ( { 100, 200, 103 }, dRanges );
AddRange ( { 201, 300, 120 }, dRanges );
MergeIntersectedRanges ( dRanges );
EXPECT_EQ ( dRanges.GetLength(), 2 );
auto dFoo = dRanges[0];
EXPECT_EQ ( dFoo.m_iStart, 100 );
EXPECT_EQ ( dFoo.m_iEnd, 200 );
EXPECT_EQ ( dFoo.m_iRemapStart, 103 );
dFoo = dRanges[1];
EXPECT_EQ ( dFoo.m_iStart, 201 );
EXPECT_EQ ( dFoo.m_iEnd, 300 );
EXPECT_EQ ( dFoo.m_iRemapStart, 120 );
}
TEST ( Charsets, Merge_SameRange )
{
CSphVector<RemapRangeTagged_t> dRanges;
AddRange ( { 100, 200, 103 }, dRanges );
AddRange ( { 100, 200, 120 }, dRanges );
MergeIntersectedRanges ( dRanges );
EXPECT_EQ ( dRanges.GetLength(), 1 );
auto dFoo = dRanges[0];
EXPECT_EQ ( dFoo.m_iStart, 100 );
EXPECT_EQ ( dFoo.m_iEnd, 200 );
EXPECT_EQ ( dFoo.m_iRemapStart, 120 );
}
TEST ( Charsets, Merge_FirstEndLonger )
{
CSphVector<RemapRangeTagged_t> dRanges;
AddRange ( { 100, 150, 102 }, dRanges );
AddRange ( { 100, 200, 101 }, dRanges );
MergeIntersectedRanges ( dRanges );
EXPECT_EQ ( dRanges.GetLength(), 1 );
auto dFoo = dRanges[0];
EXPECT_EQ ( dFoo.m_iStart, 100 );
EXPECT_EQ ( dFoo.m_iEnd, 200 );
EXPECT_EQ ( dFoo.m_iRemapStart, 101 );
}
TEST ( Charsets, Merge_intersection )
{
CSphVector<RemapRangeTagged_t> dRanges;
AddRange ( { 100, 150, 101 }, dRanges );
AddRange ( { 110, 160, 102 }, dRanges );
MergeIntersectedRanges ( dRanges );
EXPECT_EQ ( dRanges.GetLength(), 2 );
auto dFoo = dRanges[0];
EXPECT_EQ ( dFoo.m_iStart, 100 );
EXPECT_EQ ( dFoo.m_iEnd, 109 );
EXPECT_EQ ( dFoo.m_iRemapStart, 101 );
dFoo = dRanges[1];
EXPECT_EQ ( dFoo.m_iStart, 110 );
EXPECT_EQ ( dFoo.m_iEnd, 160 );
EXPECT_EQ ( dFoo.m_iRemapStart, 102 );
}
TEST ( Charsets, Merge_intersectionSameEnd )
{
CSphVector<RemapRangeTagged_t> dRanges;
AddRange ( { 100, 150, 101 }, dRanges );
AddRange ( { 110, 150, 102 }, dRanges );
MergeIntersectedRanges ( dRanges );
EXPECT_EQ ( dRanges.GetLength(), 2 );
auto dFoo = dRanges[0];
EXPECT_EQ ( dFoo.m_iStart, 100 );
EXPECT_EQ ( dFoo.m_iEnd, 109 );
EXPECT_EQ ( dFoo.m_iRemapStart, 101 );
dFoo = dRanges[1];
EXPECT_EQ ( dFoo.m_iStart, 110 );
EXPECT_EQ ( dFoo.m_iEnd, 150 );
EXPECT_EQ ( dFoo.m_iRemapStart, 102 );
}
TEST ( Charsets, Merge_intersectionFullCover )
{
CSphVector<RemapRangeTagged_t> dRanges;
AddRange ( { 100, 160, 101 }, dRanges );
AddRange ( { 110, 150, 102 }, dRanges );
MergeIntersectedRanges ( dRanges );
EXPECT_EQ ( dRanges.GetLength(), 3 );
auto dFoo = dRanges[0];
EXPECT_EQ ( dFoo.m_iStart, 100 );
EXPECT_EQ ( dFoo.m_iEnd, 109 );
EXPECT_EQ ( dFoo.m_iRemapStart, 101 );
dFoo = dRanges[1];
EXPECT_EQ ( dFoo.m_iStart, 110 );
EXPECT_EQ ( dFoo.m_iEnd, 150 );
EXPECT_EQ ( dFoo.m_iRemapStart, 102 );
dFoo = dRanges[2];
EXPECT_EQ ( dFoo.m_iStart, 151 );
EXPECT_EQ ( dFoo.m_iEnd, 160 );
EXPECT_EQ ( dFoo.m_iRemapStart, 152 );
}
TEST ( Charsets, ComplexMergeRanges )
{
CSphVector<RemapRangeTagged_t> dRanges;
AddRange ( { 100, 200, 103 }, dRanges );
AddRange ( { 110, 190, 120 }, dRanges );
AddRange ( { 120, 180, 121 }, dRanges );
AddRange ( { 130, 170, 122 }, dRanges );
AddRange ( { 140, 160, 123 }, dRanges );
AddRange ( { 142, 158, 125 }, dRanges );
AddRange ( { 144, 156, 127 }, dRanges );
AddRange ( { 146, 170, 129 }, dRanges );
AddRange ( { 150, 150, 124 }, dRanges );
MergeIntersectedRanges ( dRanges );
EXPECT_EQ ( dRanges.GetLength(), 10 );
auto dFoo = dRanges[0];
EXPECT_EQ ( dFoo.m_iStart, 100 );
EXPECT_EQ ( dFoo.m_iEnd, 109 );
EXPECT_EQ ( dFoo.m_iRemapStart, 103 );
dFoo = dRanges[1];
EXPECT_EQ ( dFoo.m_iStart, 110 );
EXPECT_EQ ( dFoo.m_iEnd, 119 );
EXPECT_EQ ( dFoo.m_iRemapStart, 120 );
dFoo = dRanges[2];
EXPECT_EQ ( dFoo.m_iStart, 120 );
EXPECT_EQ ( dFoo.m_iEnd, 129 );
EXPECT_EQ ( dFoo.m_iRemapStart, 121 );
dFoo = dRanges[3];
EXPECT_EQ ( dFoo.m_iStart, 130 );
EXPECT_EQ ( dFoo.m_iEnd, 139 );
EXPECT_EQ ( dFoo.m_iRemapStart, 122 );
dFoo = dRanges[4];
EXPECT_EQ ( dFoo.m_iStart, 140 );
EXPECT_EQ ( dFoo.m_iEnd, 149 );
EXPECT_EQ ( dFoo.m_iRemapStart, 123 );
dFoo = dRanges[5];
EXPECT_EQ ( dFoo.m_iStart, 150 );
EXPECT_EQ ( dFoo.m_iEnd, 150 );
EXPECT_EQ ( dFoo.m_iRemapStart, 124 );
dFoo = dRanges[6];
EXPECT_EQ ( dFoo.m_iStart, 151 );
EXPECT_EQ ( dFoo.m_iEnd, 170 );
EXPECT_EQ ( dFoo.m_iRemapStart, 134 );
dFoo = dRanges[7];
EXPECT_EQ ( dFoo.m_iStart, 171 );
EXPECT_EQ ( dFoo.m_iEnd, 180 );
EXPECT_EQ ( dFoo.m_iRemapStart, 172 );
dFoo = dRanges[8];
EXPECT_EQ ( dFoo.m_iStart, 181 );
EXPECT_EQ ( dFoo.m_iEnd, 190 );
EXPECT_EQ ( dFoo.m_iRemapStart, 191 );
dFoo = dRanges[9];
EXPECT_EQ ( dFoo.m_iStart, 191 );
EXPECT_EQ ( dFoo.m_iEnd, 200 );
EXPECT_EQ ( dFoo.m_iRemapStart, 194 );
}
| 47,288
|
C++
|
.cpp
| 1,190
| 37.039496
| 219
| 0.571957
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,994
|
gtests_searchd.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_searchd.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <gtest/gtest.h>
// simplest way to test searchd internals - include the source, supress main() function there.
#define SUPRESS_SEARCHD_MAIN 1
#include "searchd.cpp"
#if POLLING_EPOLL
// different aspects of epoll internals
TEST ( searchd_stuff, epoll_behaviour )
{
// create pair of socket and epoll,
// add 'server' socket to epoll instance,
// prepare send and receive buffers.
int dSockets[2] = { -1, -1 };
auto iRes = socketpair ( AF_LOCAL, SOCK_STREAM, 0, dSockets );
ASSERT_EQ ( iRes, 0 );
auto iServer = dSockets[0];
auto iClient = dSockets[1];
int iPoll = epoll_create (2);
epoll_event tEv = { 0 };
tEv.events = EPOLLIN; // EPOLLOUT : 0 );
iRes = epoll_ctl ( iPoll, EPOLL_CTL_ADD, iServer, &tEv );
ASSERT_NE (iRes, -1);
const char* pSrc = "hello";
char receiver[10];
// send 'hello' into client socket
send ( iClient, pSrc, 6, MSG_NOSIGNAL | MSG_DONTWAIT );
tEv = {0};
iRes = epoll_wait ( iPoll, &tEv, 1, 1000);
ASSERT_EQ (iRes, 1) << "Server socket received " << iRes << " events";
// receive only 3 bytes (so 3 more left in buf).
recv ( iServer, receiver, 3, MSG_WAITALL );
receiver[3] = '\0';
ASSERT_STREQ ( receiver, "hel");
// now delete server from subscription.
tEv = {0};
iRes = epoll_ctl ( iPoll, EPOLL_CTL_DEL, iServer, &tEv );
ASSERT_NE ( iRes, -1 );
// assert that epoll rerturned nothing (3 bytes still there)
iRes = epoll_wait ( iPoll, &tEv, 1, 500 );
ASSERT_EQ ( iRes, 0 );
// substribe server for output
tEv = {0};
tEv.events = EPOLLOUT;
iRes = epoll_ctl ( iPoll, EPOLL_CTL_ADD, iServer, &tEv );
// ensure it is ready for writing
tEv = {0};
iRes = epoll_wait ( iPoll, &tEv, 1, 1000 );
ASSERT_EQ ( iRes, 1 );
// send the same 'hello' from other end of the socket.
send ( iServer, pSrc, 6, MSG_NOSIGNAL | MSG_DONTWAIT );
// ensure it still ready for writing (6 bytes definitely not fill whole buffer)
tEv = { 0 };
iRes = epoll_wait ( iPoll, &tEv, 1, 1000 );
ASSERT_EQ ( iRes, 1 );
ASSERT_EQ ( tEv.events, EPOLLOUT );
// change 'out' to 'in' subscription
tEv.events = EPOLLIN | EPOLLOUT;
iRes = epoll_ctl ( iPoll, EPOLL_CTL_MOD, iServer, &tEv );
ASSERT_NE ( iRes, -1 );
// ensure we can still read from the sock.
tEv = { 0 };
iRes = epoll_wait ( iPoll, &tEv, 1, 1000 );
ASSERT_EQ ( iRes, 1 );
ASSERT_EQ ( tEv.events, EPOLLIN | EPOLLOUT );
// receive rest of originally sent buffer.
recv ( iServer, receiver, 3, MSG_NOSIGNAL | MSG_WAITALL );
ASSERT_STREQ ( receiver, "lo" );
// the gray area: socket still subscribed; we close it
::close ( iClient );
// ensure it is still ready for reading - EOF is there!
tEv = { 0 };
iRes = epoll_wait ( iPoll, &tEv, 1, 1000 );
ASSERT_EQ ( iRes, 1 );
ASSERT_EQ ( tEv.events, EPOLLHUP + EPOLLERR + EPOLLIN + EPOLLOUT);
// here we have EPOLLHUP + EPOLLER + EPOLLIN + EPOLLOUT
// it will receive nothing - EOF is here.
iRes = recv ( iServer, receiver, 10, MSG_NOSIGNAL );
ASSERT_EQ ( iRes, -1 );
// now again, it will return 1. Epollerr went out.
iRes = epoll_wait ( iPoll, &tEv, 1, 1000 );
ASSERT_EQ ( iRes, 1 );
ASSERT_EQ ( tEv.events, EPOLLHUP + EPOLLIN + EPOLLOUT );
// this send should be impossible since other socket is closed.
iRes = send ( iServer, pSrc, 6, MSG_NOSIGNAL | MSG_DONTWAIT );
ASSERT_EQ ( iRes, -1 );
// now again, it will return 1. Epollerr went out.
iRes = epoll_wait ( iPoll, &tEv, 1, 1000 );
ASSERT_EQ ( iRes, 1 );
ASSERT_EQ ( tEv.events, EPOLLHUP + EPOLLIN + EPOLLOUT );
// now close out side also...
::close ( iServer );
tEv = { 0 };
iRes = epoll_wait ( iPoll, &tEv, 1, 500 );
ASSERT_EQ ( iRes, 0 );
// empty epoll
tEv = {0};
iRes = epoll_ctl ( iPoll, EPOLL_CTL_DEL, iServer, &tEv );
ASSERT_EQ ( iRes, -1 ) << "event auto-deleted";
// ensure that _now_ we have nothing in epoll.
tEv = { 0 };
iRes = epoll_wait ( iPoll, &tEv, 1, 500 );
ASSERT_EQ ( iRes, 0 );
::close (iPoll);
}
#endif
TEST ( searchd_stuff, iovec_behaviour )
{
SmartOutputBuffer_t tSrc;
IOVec_c tIO;
tSrc.SendDword (0xAAAAAAAA);
tIO.BuildFrom (tSrc);
tSrc.StartNewChunk ();
tSrc.SendDword ( 0xBBBBBBBB);
tIO.BuildFrom ( tSrc );
ASSERT_EQ ( tIO.IOSize (), 2);
tSrc.StartNewChunk();
tIO.BuildFrom ( tSrc );
ASSERT_EQ ( tIO.IOSize (), 2 );
tIO.StepForward (2);
ASSERT_EQ ( tIO.IOSize (), 2 );
tIO.StepForward (2);
ASSERT_EQ ( tIO.IOSize (), 1 );
tIO.StepForward (4);
ASSERT_EQ ( tIO.IOSize (), 0 );
}
TEST ( searchd_stuff, prepare_emulation )
{
CSphQuery tQuery;
tQuery.m_eMode = SPH_MATCH_ALL;
PrepareQueryEmulation ( &tQuery );
ASSERT_EQ ( tQuery.m_eRanker, SPH_RANK_PROXIMITY );
}
class CustomNetloop_c : public ::testing::Test
{
protected:
void SetUp () override
{
m_pPoll = std::make_unique<NetPooller_c> ( 1000 );
int64_t tmNow = sphMicroTimer ();
SetupEvent ( new CSphWakeupEvent, tmNow );
SetupEvent ( new CSphWakeupEvent, tmNow );
SetupEvent ( new CSphWakeupEvent, tmNow );
}
void TearDown () NO_THREAD_SAFETY_ANALYSIS override
{
m_pPoll->ProcessAll( [this] ( NetPollEvent_t * pWork ) NO_THREAD_SAFETY_ANALYSIS {
m_pPoll->RemoveEvent ( pWork );
SafeRelease ( pWork );
} );
}
void SetupEvent ( ISphNetAction * pWork, int64_t tmNow ) NO_THREAD_SAFETY_ANALYSIS
{
m_pPoll->SetupEvent ( pWork );
}
CSphVector<ISphNetAction *> RemoveOutdated ( int iOutdate, int iOutdate2=-1 ) NO_THREAD_SAFETY_ANALYSIS
{
CSphVector<ISphNetAction *> dCleanup;
int ev = -1;
// remove outdated items on no signals
m_pPoll->ProcessAll ( [&] ( NetPollEvent_t * pEvent ) NO_THREAD_SAFETY_ANALYSIS {
++ev;
if ( ev!=iOutdate && ev!=iOutdate2)
return;
m_pPoll->RemoveEvent ( pEvent );
auto * pWork = (ISphNetAction *) pEvent;
dCleanup.Add ( pWork );
} );
return dCleanup;
}
std::unique_ptr<NetPooller_c> m_pPoll;
};
TEST_F ( CustomNetloop_c, test_usual_remove_1st )
{
int to_del = 0;
auto dOutdated = RemoveOutdated ( to_del );
ARRAY_FOREACH ( i, dOutdated )
SafeRelease ( dOutdated[i] );
}
TEST_F ( CustomNetloop_c, test_usual_remove_2nd )
{
int to_del = 1;
auto dOutdated = RemoveOutdated ( to_del );
ARRAY_FOREACH ( i, dOutdated )
SafeRelease ( dOutdated[i] );
}
TEST_F ( CustomNetloop_c, test_usual_remove_3rd )
{
int to_del = 2;
auto dOutdated = RemoveOutdated ( to_del );
ARRAY_FOREACH ( i, dOutdated )
SafeRelease ( dOutdated[i] );
}
TEST_F ( CustomNetloop_c, test_usual_remove_12 )
{
auto dOutdated = RemoveOutdated ( 0,1 );
ARRAY_FOREACH ( i, dOutdated )
SafeRelease ( dOutdated[i] );
}
TEST_F ( CustomNetloop_c, test_usual_remove_23 )
{
auto dOutdated = RemoveOutdated ( 1, 2 );
ARRAY_FOREACH ( i, dOutdated )
SafeRelease ( dOutdated[i] );
}
| 7,113
|
C++
|
.cpp
| 214
| 31.046729
| 104
| 0.681513
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,995
|
gtests_functions.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_functions.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <gtest/gtest.h>
#include "sphinxint.h"
#include "sphinxutils.h"
#include "json/cJSON.h"
#include "threadutils.h"
#include <cmath>
#include "histogram.h"
#include "conversion.h"
#include "digest_sha1.h"
// Miscelaneous short functional tests: TDigest, SpanSearch,
// stringbuilder, CJson, TaggedHash, Log2
//////////////////////////////////////////////////////////////////////////
class TDigest : public ::testing::Test
{
protected:
void SetUp() override
{
sphSrand ( 0 );
pDigest = &tDigest;
}
TDigest_c tDigest;
TDigest_c* pDigest;
};
TEST_F ( TDigest, simple )
{
for ( int i = 1; i<=100; i++ )
pDigest->Add ( i, 1 );
ASSERT_NEAR ( pDigest->Percentile ( 50 ), 51, 1 );
ASSERT_NEAR ( pDigest->Percentile ( 95 ), 96, 1 );
ASSERT_NEAR ( pDigest->Percentile ( 99 ), 100, 1 );
}
TEST_F ( TDigest, dupes )
{
for ( int j = 0; j<3; j++ )
for ( int i = 0; i<10000; i++ )
pDigest->Add ( i / 100 + 1, 1 );
ASSERT_NEAR ( pDigest->Percentile ( 50 ), 51, 1 );
ASSERT_NEAR ( pDigest->Percentile ( 95 ), 96, 1 );
ASSERT_NEAR ( pDigest->Percentile ( 99 ), 100, 1 );
}
TEST_F ( TDigest, compression )
{
for ( int i = 0; i<10000; i++ )
pDigest->Add ( i + 1, 1 );
ASSERT_NEAR ( pDigest->Percentile ( 50 ), 5001, 1.5 );
ASSERT_NEAR ( pDigest->Percentile ( 95 ), 9501, 1.5 );
ASSERT_NEAR ( pDigest->Percentile ( 99 ), 9901, 1.5 );
}
//////////////////////////////////////////////////////////////////////////
TEST ( Misc, SpanSearch )
{
CSphVector<int> dVec;
dVec.Add ( 1 );
dVec.Add ( 3 );
dVec.Add ( 4 );
ASSERT_EQ ( FindSpan ( dVec, 1, 5 ), 0 );
ASSERT_EQ ( FindSpan ( dVec, 3, 5 ), 1 );
ASSERT_EQ ( FindSpan ( dVec, 4, 5 ), 2 );
ASSERT_EQ ( FindSpan ( dVec, 0, 5 ), -1 );
ASSERT_EQ ( FindSpan ( dVec, 11, 5 ), 2 );
dVec.Add ( 15 );
dVec.Add ( 17 );
dVec.Add ( 22 );
dVec.Add ( 23 );
ASSERT_EQ ( FindSpan ( dVec, 1, 5 ), 0 );
ASSERT_EQ ( FindSpan ( dVec, 18, 5 ), 4 );
ASSERT_EQ ( FindSpan ( dVec, 23, 5 ), 6 );
ASSERT_EQ ( FindSpan ( dVec, 0, 5 ), -1 );
ASSERT_EQ ( FindSpan ( dVec, 31, 5 ), 6 );
}
//////////////////////////////////////////////////////////////////////////
TEST( functions, TaggedHash20_t )
{
const char * sFIPS = "45f44fd2db02b08b4189abf21e90edd712c9616d *rt_full.ram\n";
const HASH20_t bytescheck { 0x45, 0xf4, 0x4f, 0xd2, 0xdb, 0x02, 0xb0, 0x8b, 0x41, 0x89, 0xab, 0xf2, 0x1e, 0x90, 0xed, 0xd7, 0x12, 0xc9, 0x61, 0x6d };
const char * namecheck = "rt_full.ram";
TaggedHash20_t tHash ( "HelloFips" );
CSphString sFips = tHash.ToFIPS ();
EXPECT_TRUE ( sFips=="" );
tHash.FromFIPS ( sFIPS );
ASSERT_TRUE ( tHash.m_sTagName==namecheck );
ASSERT_EQ ( tHash.m_dHashValue, bytescheck );
sFips = tHash.ToFIPS ();
ASSERT_TRUE ( sFips==sFIPS );
TaggedHash20_t tHash2 ( namecheck, bytescheck );
ASSERT_TRUE ( tHash2.ToFIPS ()==sFIPS );
}
TEST ( functions, SHA1_hashing )
{
auto sData = FromSz ( "bla-bla-bla" );
auto sHash = CalcSHA1 ( sData.first, sData.second );
ASSERT_STREQ ( sHash.cstr(), "1d537ba3814495b5be2c8f6537e4bd6764fcc9b4");
}
//////////////////////////////////////////////////////////////////////////
unsigned int nlog2 ( uint64_t x )
{
x |= ( x >> 1 ) | 1;
x |= ( x >> 2 );
x |= ( x >> 4 );
x |= ( x >> 8 );
x |= ( x >> 16 );
if ( x&0xFFFFFFFF00000000 )
return 32+ sphBitCount(x>>32);
return sphBitCount(x&0xFFFFFFFF);
}
TEST ( functions, Log2 )
{
EXPECT_EQ ( sphLog2 ( 0 ), 1 );
EXPECT_EQ ( sphLog2 ( 1 ), 1 );
EXPECT_EQ ( sphLog2 ( 2 ), 2 );
EXPECT_EQ ( sphLog2 ( 3 ), 2 );
EXPECT_EQ ( sphLog2 ( 4 ), 3 );
EXPECT_EQ ( sphLog2 ( 5 ), 3 );
EXPECT_EQ ( sphLog2 ( 6 ), 3 );
EXPECT_EQ ( sphLog2 ( 7 ), 3 );
EXPECT_EQ ( sphLog2 ( 8 ), 4 );
EXPECT_EQ ( sphLog2 ( 9 ), 4 );
EXPECT_EQ ( sphLog2 ( 10 ), 4 );
EXPECT_EQ ( sphLog2 ( 65535 ), 16 );
EXPECT_EQ ( sphLog2 ( 65536 ), 17 );
EXPECT_EQ ( sphLog2 ( 0xffffffffUL ), 32 );
EXPECT_EQ ( sphLog2 ( 0x100000000ULL ), 33 );
EXPECT_EQ ( sphLog2 ( 0x100000001ULL ), 33 );
EXPECT_EQ ( sphLog2 ( 0x1ffffffffULL ), 33 );
EXPECT_EQ ( sphLog2 ( 0x200000000ULL ), 34 );
EXPECT_EQ ( sphLog2 ( 0xffffffffffffffffULL ), 64 );
EXPECT_EQ ( sphLog2 ( 0xfffffffffffffffeULL ), 64 );
EXPECT_EQ ( sphLog2 ( 0xefffffffffffffffULL ), 64 );
EXPECT_EQ ( sphLog2 ( 0x7fffffffffffffffULL ), 63 );
EXPECT_EQ ( nlog2 ( 0 ), 1 ) << "emulated";
EXPECT_EQ ( nlog2 ( 1 ), 1 );
EXPECT_EQ ( nlog2 ( 2 ), 2 );
EXPECT_EQ ( nlog2 ( 3 ), 2 );
EXPECT_EQ ( nlog2 ( 4 ), 3 );
EXPECT_EQ ( nlog2 ( 5 ), 3 );
EXPECT_EQ ( nlog2 ( 6 ), 3 );
EXPECT_EQ ( nlog2 ( 7 ), 3 );
EXPECT_EQ ( nlog2 ( 8 ), 4 );
EXPECT_EQ ( nlog2 ( 9 ), 4 );
EXPECT_EQ ( nlog2 ( 10 ), 4 );
EXPECT_EQ ( nlog2 ( 65535 ), 16 );
EXPECT_EQ ( nlog2 ( 65536 ), 17 );
EXPECT_EQ ( nlog2 ( 0xffffffffUL ), 32 );
EXPECT_EQ ( nlog2 ( 0x100000000ULL ), 33 );
EXPECT_EQ ( nlog2 ( 0x100000001ULL ), 33 );
EXPECT_EQ ( nlog2 ( 0x1ffffffffULL ), 33 );
EXPECT_EQ ( nlog2 ( 0x200000000ULL ), 34 );
EXPECT_EQ ( nlog2 ( 0xffffffffffffffffULL ), 64 );
EXPECT_EQ ( nlog2 ( 0xfffffffffffffffeULL ), 64 );
EXPECT_EQ ( nlog2 ( 0xefffffffffffffffULL ), 64 );
ASSERT_EQ ( sphLog2 ( 0x7fffffffffffffffULL ), 63 );
}
//////////////////////////////////////////////////////////////////////////
TEST ( functions, sphCalcZippedLen )
{
EXPECT_EQ ( sphCalcZippedLen ( 0 ), 1 );
EXPECT_EQ ( sphCalcZippedLen ( 1 ), 1 );
EXPECT_EQ ( sphCalcZippedLen ( 2 ), 1 );
EXPECT_EQ ( sphCalcZippedLen ( 4 ), 1 );
EXPECT_EQ ( sphCalcZippedLen ( 8 ), 1 );
EXPECT_EQ ( sphCalcZippedLen ( 0x10 ), 1 );
EXPECT_EQ ( sphCalcZippedLen ( 0x20 ), 1 );
EXPECT_EQ ( sphCalcZippedLen ( 0x40 ), 1 );
EXPECT_EQ ( sphCalcZippedLen ( 0x7F ), 1 );
EXPECT_EQ ( sphCalcZippedLen ( 0x80 ), 2 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0100 ), 2 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0200 ), 2 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0400 ), 2 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0800 ), 2 );
EXPECT_EQ ( sphCalcZippedLen ( 0x1000 ), 2 );
EXPECT_EQ ( sphCalcZippedLen ( 0x2000 ), 2 );
EXPECT_EQ ( sphCalcZippedLen ( 0x3FFF ), 2 );
EXPECT_EQ ( sphCalcZippedLen ( 0x4000 ), 3 );
EXPECT_EQ ( sphCalcZippedLen ( 0x8000 ), 3 );
EXPECT_EQ ( sphCalcZippedLen ( 0x00010000 ), 3 );
EXPECT_EQ ( sphCalcZippedLen ( 0x00020000 ), 3 );
EXPECT_EQ ( sphCalcZippedLen ( 0x00040000 ), 3 );
EXPECT_EQ ( sphCalcZippedLen ( 0x00080000 ), 3 );
EXPECT_EQ ( sphCalcZippedLen ( 0x00100000 ), 3 );
EXPECT_EQ ( sphCalcZippedLen ( 0x001FFFFF ), 3 );
EXPECT_EQ ( sphCalcZippedLen ( 0x00200000 ), 4 );
EXPECT_EQ ( sphCalcZippedLen ( 0x00400000 ), 4 );
EXPECT_EQ ( sphCalcZippedLen ( 0x00800000 ), 4 );
EXPECT_EQ ( sphCalcZippedLen ( 0x01000000 ), 4 );
EXPECT_EQ ( sphCalcZippedLen ( 0x02000000 ), 4 );
EXPECT_EQ ( sphCalcZippedLen ( 0x04000000 ), 4 );
EXPECT_EQ ( sphCalcZippedLen ( 0x08000000 ), 4 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0FFFFFFF ), 4 );
EXPECT_EQ ( sphCalcZippedLen ( 0x10000000 ), 5 );
EXPECT_EQ ( sphCalcZippedLen ( 0x20000000 ), 5 );
EXPECT_EQ ( sphCalcZippedLen ( 0x40000000 ), 5 );
EXPECT_EQ ( sphCalcZippedLen ( 0x80000000 ), 5 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000000100000000 ), 5 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000000200000000 ), 5 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000000400000000 ), 5 );
EXPECT_EQ ( sphCalcZippedLen ( 0x00000007FFFFFFFF ), 5 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000000800000000 ), 6 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000001000000000 ), 6 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000002000000000 ), 6 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000004000000000 ), 6 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000008000000000 ), 6 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000010000000000 ), 6 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000020000000000 ), 6 );
EXPECT_EQ ( sphCalcZippedLen ( 0x000003FFFFFFFFFF ), 6 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000040000000000 ), 7 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000080000000000 ), 7 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000100000000000 ), 7 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000200000000000 ), 7 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000400000000000 ), 7 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0000800000000000 ), 7 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0001000000000000 ), 7 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0001FFFFFFFFFFFF ), 7 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0002000000000000 ), 8 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0004000000000000 ), 8 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0008000000000000 ), 8 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0010000000000000 ), 8 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0020000000000000 ), 8 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0040000000000000 ), 8 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0080000000000000 ), 8 );
EXPECT_EQ ( sphCalcZippedLen ( 0x00FFFFFFFFFFFFFF ), 8 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0100000000000000 ), 9 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0200000000000000 ), 9 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0400000000000000 ), 9 );
EXPECT_EQ ( sphCalcZippedLen ( 0x0800000000000000 ), 9 );
EXPECT_EQ ( sphCalcZippedLen ( 0x1000000000000000 ), 9 );
EXPECT_EQ ( sphCalcZippedLen ( 0x2000000000000000 ), 9 );
EXPECT_EQ ( sphCalcZippedLen ( 0x4000000000000000 ), 9 );
EXPECT_EQ ( sphCalcZippedLen ( 0x7FFFFFFFFFFFFFFF ), 9 );
EXPECT_EQ ( sphCalcZippedLen ( 0x8000000000000000 ), 10 );
EXPECT_EQ ( sphCalcZippedLen ( 0xFFFFFFFFFFFFFFFF ), 10 );
}
//////////////////////////////////////////////////////////////////////////
CSphMutex g_Mutex1;
void TimedLockTest ()
{
ASSERT_FALSE ( g_Mutex1.TimedLock ( 1000 ) ) << "timedlock attempt 1";
ASSERT_FALSE ( g_Mutex1.TimedLock ( 1000 ) ) << "timedlock attempt 2";
ASSERT_FALSE ( g_Mutex1.TimedLock ( 1000 ) ) << "timedlock attempt 3";
ASSERT_TRUE ( g_Mutex1.TimedLock ( 1000 ) ) << "timedlock attempt 4";
ASSERT_TRUE ( g_Mutex1.Unlock () );
}
TEST (functions, Mutex)
{
SphThread_t th;
ASSERT_TRUE ( g_Mutex1.Lock () ) << "locked";
ASSERT_TRUE ( Threads::Create ( &th, TimedLockTest ) ) << "timedlock thread created";
sphSleepMsec ( 3500 );
ASSERT_TRUE ( g_Mutex1.Unlock () ) << "unlocked";
ASSERT_TRUE ( Threads::Join ( &th ) ) << "timedlock thread done";
}
//////////////////////////////////////////////////////////////////////////
class TZip: public ::testing::Test
{
protected:
void SetUp() override
{
uint64_t uBase = 0xDEADBEAF12345678;
for ( auto i = 0; i < 65; ++i )
{
uint64_t c64 = uBase >> i;
dValues64.Add ( c64 );
ZipValueBE ( [this] ( BYTE b ) mutable { dBufBE64.Add ( b ); }, c64 );
ZipValueLE ( [this] ( BYTE b ) mutable { dBufLE64.Add ( b ); }, c64 );
DWORD c32 = c64 & 0xFFFFFFFF;
dValues32.Add ( c32 );
ZipValueBE ( [this] ( BYTE b ) mutable { dBufBE.Add ( b ); }, c32 );
ZipValueLE ( [this] ( BYTE b ) mutable { dBufLE.Add ( b ); }, c32 );
}
}
CSphVector<DWORD> dValues32;
CSphVector<uint64_t> dValues64;
CSphVector<BYTE> dBufBE;
CSphVector<BYTE> dBufLE;
CSphVector<BYTE> dBufBE64;
CSphVector<BYTE> dBufLE64;
};
TEST_F ( TZip, BE32 )
{
const BYTE* pBuf = dBufBE.begin();
for ( auto& ref : dValues32 )
{
auto val = UnzipIntBE ( pBuf );
ASSERT_EQ ( ref, val );
}
}
TEST_F ( TZip, BE64 )
{
const BYTE* pBuf = dBufBE64.begin();
for ( auto& ref : dValues64 )
{
auto val = UnzipOffsetBE ( pBuf );
ASSERT_EQ ( ref, val );
}
}
TEST_F ( TZip, LE32 )
{
const BYTE* pBuf = dBufLE.begin();
for ( auto& ref : dValues32 )
{
auto val = UnzipIntLE ( pBuf );
ASSERT_EQ ( ref, val );
}
}
TEST_F ( TZip, LE64 )
{
const BYTE* pBuf = dBufLE64.begin();
for ( auto& ref : dValues64 )
{
auto val = UnzipOffsetLE ( pBuf );
ASSERT_EQ ( ref, val );
}
}
//////////////////////////////////////////////////////////////////////////
static int g_iRwlock;
static RwLock_t g_tRwlock;
void RwlockReader ( void * pArg )
{
ASSERT_TRUE ( g_tRwlock.ReadLock () );
sphSleepMsec ( 10 );
*( int * ) pArg = g_iRwlock;
ASSERT_TRUE ( g_tRwlock.Unlock () );
}
void RwlockWriter ( void * pArg )
{
ASSERT_TRUE ( g_tRwlock.WriteLock () );
g_iRwlock += static_cast<int>(reinterpret_cast<intptr_t>(pArg));
sphSleepMsec ( 3 );
ASSERT_TRUE ( g_tRwlock.Unlock () );
}
TEST ( functions, RWLock )
{
const int NPAIRS = 10;
SphThread_t dReaders[NPAIRS];
SphThread_t dWriters[NPAIRS];
int dRead[NPAIRS];
g_iRwlock = 0;
for ( int i = 0; i<NPAIRS; i++ )
{
ASSERT_TRUE ( Threads::Create ( &dReaders[i], [&,i] { RwlockReader ( &dRead[i] );} ));
ASSERT_TRUE ( Threads::Create ( &dWriters[i], [&,i] { RwlockWriter ( reinterpret_cast<void *>(static_cast<intptr_t>(1 + i) ) );} ));
}
for ( int i = 0; i<NPAIRS; i++ )
{
ASSERT_TRUE ( Threads::Join ( &dReaders[i] ) );
ASSERT_TRUE ( Threads::Join ( &dWriters[i] ) );
}
ASSERT_EQ ( g_iRwlock, NPAIRS * ( 1 + NPAIRS ) / 2 );
int iReadSum = 0;
for ( int i : dRead )
iReadSum += i;
RecordProperty ( "read_sum", iReadSum );
}
//////////////////////////////////////////////////////////////////////////
CSphAutoEvent g_multievent;
OneshotEvent_c g_oneevent;
volatile int64_t tmNow;
int getms()
{
return (sphMicroTimer () - tmNow)/1000;
}
void AutoEventest ()
{
printf("\n%d thread started", getms());
for ( int i=0; i<5; ++i)
{
ASSERT_TRUE ( g_multievent.WaitEvent ()) << "WaitEvent";
printf ( "\n%d B%d: %d-st event waited", getms (),i+1,i);
}
}
void AutoEventestTimed ()
{
printf ( "\n%d B1: started", getms ());
for ( int i = 0; i<5; ++i )
{
ASSERT_TRUE ( g_multievent.WaitEvent (500)) << "WaitEvent";
printf ( "\n%d B%d: %d-st event waited", getms (), i+2, i );
}
ASSERT_FALSE ( g_multievent.WaitEvent ( 200 )) << "WaitEvent";
printf ( "\n%d B7: one event timed-out", getms () );
ASSERT_TRUE ( g_multievent.WaitEvent ( 500 )) << "WaitEvent";
ASSERT_TRUE ( g_multievent.WaitEvent ( 500 )) << "WaitEvent";
printf ( "\n%d B8: last 2 events succeeded", getms ());
}
// multievent - we can set it N times, and then it may be waited N times also, but N+1 will block
TEST ( functions, MultiAutoEvent )
{
tmNow = sphMicroTimer ();
SphThread_t th;
// set 4 events before event start the thread. Expect, it will be catched.
printf ( "\n%d A1: set event 4 times a row", getms ());
g_multievent.SetEvent ();
g_multievent.SetEvent ();
g_multievent.SetEvent ();
g_multievent.SetEvent ();
// now start the thread, it will receive events
ASSERT_TRUE ( Threads::Create ( &th, AutoEventest )) << "autoevent thread created";
printf ( "\n%d A2: created working thread", getms () );
// sleep half-a-second and set last event.
sphSleepMsec ( 500 );
printf ( "\n%d A3: set event", getms () );
g_multievent.SetEvent ();
sphSleepMsec ( 100 );
ASSERT_TRUE ( Threads::Join ( &th )) << "autoevent thread done";
}
TEST ( functions, MultiAutoEventTimed )
{
tmNow=sphMicroTimer ();
SphThread_t th;
// set 4 events before event start the thread. Expect, it will be catched.
printf ( "\n%d 4 events set", getms () );
g_multievent.SetEvent ();
g_multievent.SetEvent ();
g_multievent.SetEvent ();
g_multievent.SetEvent ();
// now start the thread, it will receive events
ASSERT_TRUE ( Threads::Create ( &th, AutoEventestTimed ) ) << "autoeventtimed thread created";
printf ( "\n%d A2: created working thread", getms () );
// sleep half-a-second and set last event.
sphSleepMsec ( 100 );
printf ( "\n%d A3: set event", getms () );
g_multievent.SetEvent ();
sphSleepMsec ( 400 );
printf ( "\n%d A4: set event twice", getms () );
g_multievent.SetEvent ();
g_multievent.SetEvent ();
sphSleepMsec ( 100 );
ASSERT_TRUE ( Threads::Join ( &th ) ) << "autoevent thread done";
}
void OneshotEventTest()
{
printf ( "\n%d thread started", getms ());
for ( int i = 0; i<2; ++i )
{
ASSERT_TRUE ( g_oneevent.WaitEvent ()) << "WaitEvent";
printf ( "\n%d thread %d-st event waited", getms (), i );
}
}
void OneshotEventTestTimed()
{
printf ( "\n%d B1: started", getms ());
bool bRes=g_oneevent.WaitEvent ( 500 ); ASSERT_TRUE ( bRes ) << "WaitEvent";
printf ( "\n%d B2: 1-st event returned %s", getms (), bRes?"true":"false" );
bRes=g_oneevent.WaitEvent ( 500 ); ASSERT_TRUE ( bRes ) << "WaitEvent";
printf ( "\n%d B3: 2-nd event returned %s", getms (), bRes?"true":"false" );
bRes=g_oneevent.WaitEvent ( 500 ); ASSERT_TRUE ( bRes ) << "WaitEvent";
printf ( "\n%d B4: 3-rd event returned %s", getms (), bRes?"true":"false" );
sphSleepMsec ( 100 );
bRes=g_oneevent.WaitEvent ( 500 ); ASSERT_FALSE ( bRes ) << "WaitEvent";
printf ( "\n%d B5: 4-th event returned %s", getms (), bRes?"true":"false" );
}
// oneshot event - we can set it N times, but only once it waited, and then will block.
TEST ( functions, OneshotAutoEvent )
{
tmNow = sphMicroTimer ();
SphThread_t th;
// set 4 events before event start the thread. Expect, it will be catched.
printf ( "\n%d A1: set event 4 times a row", getms ());
g_oneevent.SetEvent ();
g_oneevent.SetEvent ();
g_oneevent.SetEvent ();
g_oneevent.SetEvent ();
// now start the thread, it will receive events
ASSERT_TRUE ( Threads::Create ( &th, OneshotEventTest )) << "autoevent thread created";
printf ( "\n%d A2: created working thread", getms ());
// sleep half-a-second and set last event.
sphSleepMsec ( 500 );
printf ( "\n%d A3: set event", getms ());
g_oneevent.SetEvent ();
sphSleepMsec ( 100 );
ASSERT_TRUE ( Threads::Join ( &th )) << "autoevent thread done";
}
// oneshot event - we can set it N times, but only once it waited, and then will block.
/*TEST ( functions, DISABLED_OneshotAutoEventTimed )
{
tmNow=sphMicroTimer ();
SphThread_t th;
// set 4 events before event start the thread. Expect, it will be catched.
printf ( "\n%d A1: set event 4 times a row", getms () );
g_oneevent.SetEvent ();
g_oneevent.SetEvent ();
g_oneevent.SetEvent ();
g_oneevent.SetEvent ();
// now start the thread, it will receive events
ASSERT_TRUE ( Threads::Create ( &th, OneshotEventTestTimed ) ) << "autoevent thread created";
printf ( "\n%d A2: created working thread", getms () );
// sleep half-a-second and set last event.
sphSleepMsec ( 100 );
printf ( "\n%d A3: set event", getms () );
g_oneevent.SetEvent ();
sphSleepMsec ( 400 );
printf ( "\n%d A4: set event twice", getms () );
g_oneevent.SetEvent ();
g_oneevent.SetEvent ();
sphSleepMsec ( 100 );
ASSERT_TRUE ( Threads::Join ( &th ) ) << "autoevent thread done";
}*/
//////////////////////////////////////////////////////////////////////////
#ifdef _WIN32
#pragma warning(push) // store current warning values
#pragma warning(disable:4101)
#endif
TEST ( functions, Hash_simple )
{
// add and verify a couple keys manually
OpenHashTable_T<int64_t, int> h;
int &a = h.Acquire ( 123 );
ASSERT_FALSE ( a );
a = 1;
int &b = h.Acquire ( 234 );
ASSERT_FALSE ( b);
b = 2;
ASSERT_TRUE ( h.Find ( 123 ) );
ASSERT_EQ ( *h.Find ( 123 ), 1 );
ASSERT_TRUE ( h.Find ( 234 ) );
ASSERT_EQ ( *h.Find ( 234 ), 2 );
ASSERT_FALSE ( h.Find ( 345 ) );
// add several pairs of colliding keys
const int DUPES = 8;
int64_t dupes[DUPES*2] = {
54309970105, 55904555634,
54386834629, 61870972983,
54789062086, 8033211121,
41888995393, 69125167042,
18878807922, 3782313558,
31939787707, 58687170065,
36013093500, 57976719271,
35732429300, 67391785901
};
HashFunc_Int64_t tHashFunc;
for ( int i = 0; i<2 * DUPES; i++ )
{
ASSERT_EQ ( tHashFunc.GetHash ( dupes[i] ), tHashFunc.GetHash ( dupes[( i >> 1 ) << 1] ) );
int &x = h.Acquire ( dupes[i] );
ASSERT_FALSE ( x );
x = 100 + i;
}
// verify that colliding keys hashed differently
for ( int i = 0; i<2 * DUPES; i++ )
ASSERT_EQ ( *h.Find ( dupes[i] ), 100 + i );
// verify that Add() attempts fail
for ( int i = 0; i<2 * DUPES; i++ )
ASSERT_FALSE ( h.Add ( dupes[i], 567 ) );
// delete every 1st colliding key
for ( int i = 0; i<2 * DUPES; i += 2 )
h.Delete ( dupes[i] );
// verify that 1st colliding key got deleted
for ( int i = 0; i<2 * DUPES; i += 2 )
ASSERT_FALSE ( h.Find ( dupes[i] ) );
// verify that 2nd colliding key still works ok
for ( int i = 1; i<2 * DUPES; i += 2 )
ASSERT_EQ ( *h.Find ( dupes[i] ), 100 + i );
}
#ifdef _WIN32
#pragma warning(pop) // restore warnings
#endif
TEST ( functions, HASH_randomized )
// big randomized test
{
OpenHashTable_T<int64_t, int> h;
const int NVALS = 996146; // 0.95f out of 1M
// add N numbers
sphSrand ( 0 );
for ( int i = 0; i<NVALS; i++ )
{
uint64_t k = sphRand ();
k = ( k << 32 ) + sphRand ();
h.Acquire ( k ) = i;
}
// verify that everything looks up as expected
sphSrand ( 0 );
for ( int i = 0; i<NVALS; i++ )
{
uint64_t k = sphRand ();
k = ( k << 32 ) + sphRand ();
ASSERT_EQ ( h.Acquire ( k ), i );
}
// delete every 3rd number
sphSrand ( 0 );
for ( int i = 0; i<NVALS; i++ )
{
uint64_t k = sphRand ();
k = ( k << 32 ) + sphRand ();
if ( !( i % 3 ) )
h.Delete ( k );
}
// verify that everything looks up as expected
sphSrand ( 0 );
for ( int i = 0; i<NVALS; i++ )
{
uint64_t k = sphRand ();
k = ( k << 32 ) + sphRand ();
if ( i % 3 )
ASSERT_EQ ( *h.Find ( k ), i );
else
ASSERT_FALSE ( h.Find ( k ) );
}
}
//////////////////////////////////////////////////////////////////////////
TEST ( functions, string_split )
{
StrVec_t dStr;
sphSplit ( dStr, "test:me\0off\0", ":" );
ASSERT_EQ ( dStr.GetLength (), 2 );
ASSERT_STREQ ( dStr[0].cstr(),"test" );
ASSERT_STREQ ( dStr[1].cstr(), "me" );
dStr.Reset();
sphSplit ( dStr, " white\tspace\rsplit\ntrying ");
ASSERT_EQ ( dStr.GetLength (), 4 );
ASSERT_STREQ ( dStr[0].cstr (), "white" );
ASSERT_STREQ ( dStr[1].cstr (), "space" );
ASSERT_STREQ ( dStr[2].cstr (), "split" );
ASSERT_STREQ ( dStr[3].cstr (), "trying" );
dStr.Reset();
sphSplit ( dStr, ":start:finish:", ":" );
ASSERT_EQ ( dStr.GetLength (), 3 );
ASSERT_STREQ ( dStr[0].cstr (), "" );
ASSERT_STREQ ( dStr[1].cstr (), "start" );
ASSERT_STREQ ( dStr[2].cstr (), "finish" );
}
//////////////////////////////////////////////////////////////////////////
struct TestAccCmp_fn
{
typedef DWORD MEDIAN_TYPE;
typedef DWORD * PTR_TYPE;
int m_iStride;
explicit TestAccCmp_fn ( int iStride )
: m_iStride ( iStride )
{}
DWORD Key ( DWORD * pData ) const
{
return *pData;
}
void CopyKey ( DWORD * pMed, DWORD * pVal ) const
{
*pMed = Key ( pVal );
}
bool IsLess ( DWORD a, DWORD b ) const
{
return a<b;
}
void Swap ( DWORD * a, DWORD * b ) const
{
for ( int i = 0; i<m_iStride; i++ )
::Swap ( a[i], b[i] );
}
DWORD * Add ( DWORD * p, int i ) const
{
return p + i * m_iStride;
}
int Sub ( DWORD * b, DWORD * a ) const
{
return ( int ) ( ( b - a ) / m_iStride );
}
bool IsKeyDataSynced ( const DWORD * pData ) const
{
DWORD uKey = *pData;
DWORD uHash = GenerateKey ( pData );
return uKey==uHash;
}
DWORD GenerateKey ( const DWORD * pData ) const
{
return m_iStride>1 ? sphCRC32 ( pData + 1, ( m_iStride - 1 ) * 4 ) : ( *pData );
}
};
static bool IsSorted ( DWORD * pData, int iCount, const TestAccCmp_fn &fn )
{
if ( iCount<1 )
return true;
const DWORD * pPrev = pData;
if ( !fn.IsKeyDataSynced ( pPrev ) )
return false;
if ( iCount<2 )
return true;
for ( int i = 1; i<iCount; ++i )
{
const DWORD * pCurr = fn.Add ( pData, i );
if ( fn.IsLess ( *pCurr, *pPrev ) || !fn.IsKeyDataSynced ( pCurr ) )
return false;
pPrev = pCurr;
}
return true;
}
void RandomFill ( DWORD * pData, int iCount, const TestAccCmp_fn &fn, bool bChainsaw )
{
for ( int i = 0; i<iCount; ++i )
{
DWORD * pCurr = fn.Add ( pData, i );
const DWORD * pNext = fn.Add ( pData, i + 1 );
DWORD * pElem = pCurr;
DWORD * pChainHill = bChainsaw && ( i % 2 ) ? fn.Add ( pData, i - 1 ) : NULL;
do
{
*pElem = pChainHill ? *pChainHill / 2 : sphRand ();
++pElem;
pChainHill = pChainHill ? pChainHill + 1 : pChainHill;
} while ( pElem!=pNext );
*pCurr = fn.GenerateKey ( pCurr );
}
}
void TestStridedSortPass ( int iStride, int iCount )
{
ASSERT_TRUE ( iStride );
ASSERT_TRUE ( iCount );
DWORD * pData = new DWORD[iCount * iStride];
ASSERT_TRUE ( pData );
// checked elements are random
memset ( pData, 0, sizeof ( DWORD ) * iCount * iStride );
TestAccCmp_fn fnSort ( iStride );
RandomFill ( pData, iCount, fnSort, false );
// crash on sort of mini-arrays
TestAccCmp_fn fnSortDummy ( 1 );
DWORD dMini[1] = { 1 };
sphSort ( dMini, 1, fnSortDummy, fnSortDummy );
sphSort ( dMini, 0, fnSortDummy, fnSortDummy );
ASSERT_TRUE ( IsSorted ( dMini, 1, fnSortDummy ) );
// random sort
sphSort ( pData, iCount, fnSort, fnSort );
ASSERT_TRUE ( IsSorted ( pData, iCount, fnSort ) );
// already sorted sort
sphSort ( pData, iCount, fnSort, fnSort );
ASSERT_TRUE ( IsSorted ( pData, iCount, fnSort ) );
// reverse order sort
for ( int i = 0; i<iCount; ++i )
{
::Swap ( pData[i], pData[iCount - i - 1] );
}
sphSort ( pData, iCount, fnSort, fnSort );
ASSERT_TRUE ( IsSorted ( pData, iCount, fnSort ) );
// random chainsaw sort
RandomFill ( pData, iCount, fnSort, true );
sphSort ( pData, iCount, fnSort, fnSort );
ASSERT_TRUE ( IsSorted ( pData, iCount, fnSort ) );
SafeDeleteArray ( pData );
}
TEST ( functions, StridedSort )
{
SCOPED_TRACE ( "stride 1, count 2" ); TestStridedSortPass ( 1, 2 );
SCOPED_TRACE ( "stride 3, count 2" ); TestStridedSortPass ( 3, 2 );
SCOPED_TRACE ( "stride 37, count 2" ); TestStridedSortPass ( 37, 2 );
// SMALL_THRESH case
SCOPED_TRACE ( "stride 1, count 30" ); TestStridedSortPass ( 1, 30 );
SCOPED_TRACE ( "stride 7, count 13" ); TestStridedSortPass ( 7, 13 );
SCOPED_TRACE ( "stride 113, count 5" ); TestStridedSortPass ( 113, 5 );
SCOPED_TRACE ( "stride 1, count 1000" ); TestStridedSortPass ( 1, 1000 );
SCOPED_TRACE ( "stride 5, count 1000" ); TestStridedSortPass ( 5, 1000 );
SCOPED_TRACE ( "stride 17, count 50" ); TestStridedSortPass ( 17, 50 );
SCOPED_TRACE ( "stride 31, count 1367" ); TestStridedSortPass ( 31, 1367 );
SCOPED_TRACE ( "random strides" );
// rand cases
for ( int i = 0; i<10; ++i )
{
const int iRndStride = sphRand () % 64;
const int iNrmStride = Max ( iRndStride, 1 );
const int iRndCount = sphRand () % 1000;
const int iNrmCount = Max ( iRndCount, 1 );
TestStridedSortPass ( iNrmStride, iNrmCount );
}
}
TEST ( functions, StridedSort_regressions )
{
// regression of uniq vs empty array
DWORD dUniq[] = { 1, 1, 3, 1 };
int iCount = sizeof ( dUniq ) / sizeof ( dUniq[0] );
ASSERT_FALSE ( sphUniq ( dUniq, 0 ) );
sphSort ( dUniq, iCount );
ASSERT_EQ ( sphUniq ( dUniq, iCount ), 2);
ASSERT_EQ ( dUniq[0], 1);
ASSERT_EQ ( dUniq[1], 3 );
CSphVector<DWORD> dUniq1;
dUniq1.Uniq ();
ASSERT_FALSE ( dUniq1.GetLength () );
dUniq1.Add ( 1 );
dUniq1.Add ( 3 );
dUniq1.Add ( 1 );
dUniq1.Add ( 1 );
dUniq1.Uniq ();
ASSERT_EQ ( dUniq1.GetLength (), 2 );
ASSERT_EQ ( dUniq1[0], 1 );
ASSERT_EQ ( dUniq1[1], 3 );
}
//////////////////////////////////////////////////////////////////////////
TEST ( functions, Writer )
{
const CSphString sTmpWriteout = "__writeout.tmp";
CSphString sErr;
static const auto WRITE_OUT_DATA_SIZE = 0x40000;
BYTE * pData = new BYTE[WRITE_OUT_DATA_SIZE];
memset ( pData, 0xfe, WRITE_OUT_DATA_SIZE );
{
CSphWriter tWrDef;
tWrDef.OpenFile ( sTmpWriteout, sErr );
tWrDef.PutBytes ( pData, WRITE_OUT_DATA_SIZE );
tWrDef.PutByte ( 0xff );
tWrDef.CloseFile();
}
{
CSphWriter tWr;
tWr.SetBufferSize ( WRITE_OUT_DATA_SIZE );
tWr.OpenFile ( sTmpWriteout, sErr );
tWr.PutBytes ( pData, WRITE_OUT_DATA_SIZE );
tWr.PutByte ( 0xff );
tWr.CloseFile();
}
unlink ( sTmpWriteout.cstr () );
delete[] pData;
}
//////////////////////////////////////////////////////////////////////////
struct tstcase { float wold; DWORD utimer; float wnew; };
static void TestRebalance_fn ( tstcase * pData, int iLen, int iStride )
{
ASSERT_FALSE ( iLen % iStride );
iLen /= iStride;
CSphFixedVector<int64_t> dTimers ( iStride );
CSphFixedVector<float> dWeights ( iStride );
for ( int i = 0; i<iLen; ++i )
{
for ( int j = 0; j<iStride; ++j )
{
dWeights[j] = pData[i * iStride + j].wold;
dTimers[j] = pData[i * iStride + j].utimer;
}
RebalanceWeights ( dTimers, dWeights );
for ( int j = 0; j<iStride; ++j )
{
ASSERT_NEAR ( dWeights[j], pData[i * iStride + j].wnew, 0.01)
<< " \n----dWeights[" << j << "]=" << dWeights[j] << " vs " << pData[i * iStride + j].wnew;
}
}
}
TEST ( functions, Rebalance )
{
// old weights, timers, new weights
tstcase dData1[] = { {50.5669f, 186751, 55.0625f}, {49.4316f, 228828, 44.9375f},
{55.6222f, 207608, 51.2823f}, {44.3763f, 218537, 48.7177f},
{56.8841f, 214800, 47.4951f}, {43.1144f, 194305, 52.5049f},
{54.4091f, 207614, 47.7932f}, {45.5894f, 190062, 52.2068f},
{52.2103f, 221708, 47.5706f}, {47.7882f, 201162, 52.4294f},
{49.7810f, 247379, 43.8821f}, {50.2174f, 193441, 56.1179f},
{43.6667f, 223202, 46.6167f}, {56.3317f, 194910, 53.3833f},
{40.3662f, 361018, 38.7370f}, {59.6323f, 228274, 61.2630f},
{29.9718f, 275050, 44.7756f}, {70.0267f, 223009, 55.2244f},
{25.7618f, 279008, 42.3951f}, {74.2367f, 205340, 57.6049f},
{20.3433f, 201466, 51.4136f}, {79.6551f, 213189, 48.5864f},
{21.2741f, 197584, 51.5511f}, {78.7243f, 210235, 48.4489f},
{25.3498f, 318349, 39.5014f}, {74.6487f, 207860, 60.4986f},
{18.1476f, 487120, 29.5299f}, {81.8509f, 204124, 70.4701f},
{08.5008f, 412733, 32.9526f}, {91.4977f, 202851, 67.0474f} };
TestRebalance_fn ( dData1, sizeof(dData1) / sizeof( tstcase), 2 );
tstcase dData2[] = { { 0.000000f, 0, 0.00000f }, { 00.0015f, 18469, 100.0000f } };
TestRebalance_fn ( dData2, sizeof(dData2) / sizeof( tstcase), 2 );
tstcase dData3[] = { { 0.000000f, 0, 0.00000f }, { 0.0015f, 0, 0.00000f }
, { 0.0031f, 0, 0.00000f }, { 0.0046f, 18469, 100.0000f } };
TestRebalance_fn ( dData3, sizeof ( dData3 ) / sizeof ( tstcase ), 4 );
tstcase dData4[] = { { 0.000000f, 7100, 72.2320f }, { 0.0015f, 0, 0.0f }, { 0.0031f, 18469, 27.7679f } };
TestRebalance_fn ( dData4, sizeof ( dData4 ) / sizeof ( tstcase ), 3 );
}
//////////////////////////////////////////////////////////////////////////
// parsing size - number with possible suffixes k, m, g, t.
TEST (functions, size_parser)
{
// upper case suffixes
ASSERT_EQ ( 1024, sphGetSize64 ( "1K" ) );
ASSERT_EQ ( 1024 * 1024, sphGetSize64 ( "1M" ) );
ASSERT_EQ ( 1024 * 1024 * 1024, sphGetSize64 ( "1G" ) );
ASSERT_EQ ( 1024ULL * 1024 * 1024 * 1024, sphGetSize64 ( "1T" ) );
// lower case suffixes;
// Untouched sError on success;
char * sError = nullptr;
ASSERT_EQ ( 1, sphGetSize64 ( "1", &sError ) );
ASSERT_EQ ( sError, nullptr );
ASSERT_EQ ( 1024, sphGetSize64 ( "1k", &sError ) );
ASSERT_EQ ( sError, nullptr );
ASSERT_EQ ( 1024 * 1024, sphGetSize64 ( "1m", &sError ) );
ASSERT_EQ ( sError, nullptr );
ASSERT_EQ ( 1024 * 1024 * 1024, sphGetSize64 ( "1g", &sError ) );
ASSERT_EQ ( sError, nullptr );
ASSERT_EQ ( 1024ULL * 1024 * 1024 * 1024, sphGetSize64 ( "1t", &sError ) );
ASSERT_EQ ( sError, nullptr );
// empty and null input strings
ASSERT_EQ ( 11, sphGetSize64 ( "", &sError, 11 ) );
ASSERT_EQ ( sError, nullptr );
ASSERT_EQ ( 12, sphGetSize64 ( nullptr, &sError, 12 ) );
ASSERT_EQ ( sError, nullptr );
// error handle for non-numeric
ASSERT_EQ ( -1, sphGetSize64 ( "abc", &sError ) );
ASSERT_STREQ (sError,"abc");
// error handle for numeric, but unknown suffix (=non-numeric)
ASSERT_EQ ( -1, sphGetSize64 ( "10z", &sError ) );
ASSERT_STREQ ( sError, "z" );
}
// parsing time - number with possible suffixes us, ms, s, m, h, d, w
TEST ( functions, sphGetTime64 )
{
static const struct
{ int64_t tm; const char* str; } models[] = {
{ 1, "1us" }, { 2, "2Usm" }, { 3, "3uS" }, { 4, "4US" }, // useconds
{ 1000, "1ms" }, { 2000, "2Ms" }, { 3000, "3mS" }, { 4000, "4MS" },// milliseconds
{ 1000000, "1" }, { 2000000, "2s" }, { 3000000, "3S" }, // seconds
{ 60000000, "1m" }, { 120000000, "2M" }, // minutes
{ 3600000000, "1h" }, { 36000000000, "10H" }, // hours
{ 24ULL * 3600000000, "1D" }, { 48ULL * 3600000000, "2d" }, // days
{ 7ULL * 24 * 3600000000, "1W" }, { 14ULL * 24 * 3600000000, "2w" }, // weeks
};
for ( const auto& model : models )
EXPECT_EQ ( model.tm, sphGetTime64 (model.str) ) << "for " << model.tm << " and " << model.str;
}
// Untouched sError on success;
TEST ( functions, sphGetTime64_nullerror )
{
static const struct
{ int64_t tm; const char* str; } models[] = {
{ 1000000, "1" }, { 2, "2us" }, { 1000000, "1s" }, { 60000000, "1m" },
{ 3600000000, "1h" }, { 24ULL * 3600000000, "1d" }, { 7ULL * 24 * 3600000000, "1w" },
};
char* sError = nullptr;
for ( const auto& model : models ) {
EXPECT_EQ ( model.tm, sphGetTime64 ( model.str, &sError )) << "for " << model.tm << " and " << model.str;
EXPECT_EQ ( sError, nullptr ) << "for " << model.tm << " and " << model.str;
}
}
// empty and null input strings
TEST ( functions, sphGetTime64_defaults )
{
char* sError = nullptr;
ASSERT_EQ ( 11, sphGetTime64 ( "", &sError, 11 ));
ASSERT_EQ ( sError, nullptr );
ASSERT_EQ ( 12, sphGetTime64 ( nullptr, &sError, 12 ));
ASSERT_EQ ( sError, nullptr );
}
// processing errors
TEST ( functions, sphGetTime64_errors )
{
static const struct
{ int64_t res; const char* str; const char* err;} models[] = {
{ -1, "abc", "abc" }, // error handle for non-numeric
{ -1, "10z", "z" }, // error handle for numeric, but unknown suffix (=non-numeric)
};
char* sError = nullptr;
for ( const auto& model : models ) {
EXPECT_EQ ( model.res, sphGetTime64 ( model.str, &sError ));
EXPECT_STREQ ( sError, model.err ) << "for " << model.res << " and " << model.str << " err " << model.err;
}
}
TEST ( functions, hashmap_iterations )
{
struct
{
int iVal;
const char * sKey;
} tstvalues[] =
{ { 1, "one" }
, { 2, "two" }
, { 3, "three" }
, { 4, "four" } };
SmallStringHash_T<int> tHash;
for ( auto &test: tstvalues )
tHash.Add ( test.iVal, test.sKey );
auto i = 0;
for ( const auto& tValue : tHash )
{
EXPECT_STREQ ( tValue.first.cstr (), tstvalues[i].sKey );
EXPECT_EQ ( tValue.second, tstvalues[i].iVal );
++i;
}
// test ranged-for iterations
i = 0;
for ( const auto& mp : tHash )
{
EXPECT_STREQ ( mp.first.cstr (), tstvalues[i].sKey );
EXPECT_EQ ( mp.second, tstvalues[i].iVal );
++i;
}
}
TEST ( functions, vector )
{
CSphVector<int> dVec;
dVec.Add(1);
dVec.Add(2);
auto & dv = dVec.Add();
dv = 3;
dVec.Add(4);
dVec.Add ( 5 );
dVec.Add ( 6 );
dVec.Add ( 7 );
dVec.RemoveValue (2);
dVec.Add ( 8 );
dVec.Add ( 9 );
dVec.RemoveValue ( 9);
dVec.Add ( 9 );
dVec.Add ( 10);
dVec.RemoveValue ( 10 );
ASSERT_EQ (dVec.GetLength (),8);
}
TEST ( functions, vector_slice )
{
CSphVector<int> dVec;
auto dSlice0 = dVec.Slice();
ASSERT_TRUE ( dSlice0.IsEmpty() );
dVec.Add ( 1 );
dVec.Add ( 2 );
dVec.Add ( 3 );
auto dSlice1 = dVec.Slice(-1,20);
ASSERT_EQ ( dSlice1.begin(), dVec.begin());
ASSERT_EQ ( dSlice1.GetLength (), 3 );
auto dSlice2 = dVec.Slice (10,10);
ASSERT_TRUE ( dSlice2.IsEmpty() );
auto dSlice3 = dVec.Slice(1);
ASSERT_EQ ( dSlice3.GetLength(),2);
ASSERT_EQ ( dSlice3[0], 2);
ASSERT_EQ ( dSlice3[1], 3 );
ASSERT_EQ ( dSlice3.begin(), &dVec[1]);
auto dSlice4 = dVec.Slice(1,1);
ASSERT_EQ ( dSlice4.GetLength(), 1);
ASSERT_EQ ( dSlice4.begin (), &dVec[1] );
}
TEST ( functions, vector2pair_and_pair2mva )
{
CSphVector<DWORD> dVec;
dVec.Add ( 1 );
dVec.Add ( 2 );
dVec.Add ( 3 );
ASSERT_EQ ( dVec.GetLength(), 3 );
ByteBlob_t dBlob { dVec };
ASSERT_EQ ( dBlob.first, (const BYTE*)dVec.begin() );
ASSERT_EQ ( dBlob.second, 12 ) << "3 DWORDS are 12 bytes";
dVec.Add ( 4 );
ByteBlob_t dMva {dVec};
std::pair<int64_t *, int> dTest {dVec};
ASSERT_EQ ( dTest.first, (const int64_t *) dVec.begin ());
ASSERT_EQ ( dTest.second, 2 ) << "4 DWORDS are 2 int64s";
VecTraits_T<DWORD> dMva32 { dMva };
ASSERT_EQ ( dMva32.GetLength (), 4 ) << "mva32 from 16 bytes contains 4 values";
ASSERT_EQ ( dMva32.begin(), dVec.begin() );
VecTraits_T<int64_t> dMva64 {dMva};
ASSERT_EQ ( dMva64.GetLength (), 2 ) << "mva64 from 16 bytes contains 2 values";
}
TEST ( functions, sphSplit )
{
StrVec_t dParts;
sphSplit ( dParts, "a:b,c_", ":,_");
ASSERT_EQ ( dParts.GetLength (), 3 );
ASSERT_STREQ ( dParts[0].cstr (), "a" );
ASSERT_STREQ ( dParts[1].cstr (), "b" );
ASSERT_STREQ ( dParts[2].cstr (), "c" );
dParts.Reset();
sphSplit ( dParts, "a:", ":" );
ASSERT_EQ ( dParts.GetLength (), 1 );
ASSERT_STREQ ( dParts[0].cstr (), "a" );
dParts.Reset ();
sphSplit ( dParts, ":a", ":" );
ASSERT_EQ ( dParts.GetLength (), 2 );
ASSERT_STREQ ( dParts[0].cstr (), "" );
ASSERT_STREQ ( dParts[1].cstr (), "a" );
}
// as we found g++ (GCC) 4.8.5 20150623 (Red Hat 4.8.5-28) works strange with curly initializer of refs
TEST ( functions, curledref )
{
CSphString sProof="abc";
const CSphString &sTest { sProof };
const CSphString &sTestc ( sProof );
ASSERT_TRUE ( &sProof==&sTestc ) << "curly brackets";
ASSERT_TRUE ( &sProof==&sTest ) << "figured brackets";
}
TEST ( functions, valgrind_use )
{
BYTE* VARIABLE_IS_NOT_USED pLeak = new BYTE[100];
ASSERT_TRUE (true) << "intended leak";
}
TEST ( functions, int64_le )
{
union {
DWORD pMva[2] = {0x01020304, 0x05060708};
int64_t Mva64;
} u;
// expression from MVA_UPSIZE
auto iTest = ( int64_t ) ( ( uint64_t ) u.pMva[0] | ( ( ( uint64_t ) u.pMva[1] ) << 32 ) );
auto iTest2 = MVA_UPSIZE ( u.pMva );
ASSERT_EQ ( iTest, iTest2 );
#if USE_LITTLE_ENDIAN
auto iTestLE = u.Mva64;
ASSERT_EQ ( iTest, iTestLE ) << "little endian allows simplify";
#endif
}
TEST ( functions, FindLastNumeric )
{
static const char * sNum1 = "12345";
ASSERT_EQ ( sNum1, sphFindLastNumeric (sNum1,5 ));
static const char * sNum2 = "1234 ";
ASSERT_EQ ( sNum2+5, sphFindLastNumeric ( sNum2, 5 ) );
static const char * sNum3 = "12 34";
ASSERT_EQ ( sNum3 + 3, sphFindLastNumeric ( sNum3, 5 ) );
}
/*
* Different helpers to investigate how copy/move would work
* Run test functions.trainer to investigate what is finally happens.
*/
struct train_c
{
int m_x = 0;
train_c() { std::cout << "\n-CTR train default 0 " << this; }
train_c(int x) : m_x (x) { std::cout << "\n-CTR train_c(x) " << m_x << " " << this; }
train_c(const train_c& c) : m_x(c.m_x) { std::cout << "\n-COPY train ctr "
<< m_x << " " << this << " from " << c.m_x << " " << &c;}
train_c(train_c&& c) : m_x(c.m_x) { c.m_x = 0; std::cout << "\n-MOVE train ctr "
<< m_x << " " << this << " from " << c.m_x << " " << &c;}
train_c& operator= (const train_c& c) { m_x = c.m_x; std::cout << "\n-COPY train ="
<< m_x << " " << this << " from " << &c; return *this;}
train_c& operator= ( train_c&& c ) { m_x = c.m_x; c.m_x = 0; std::cout << "\n-MOVE train ="
<< m_x << " " << this << " from " << &c; return *this;}
~train_c() { std::cout << "\n-DTR train " << m_x << " " << this; m_x = 0;}
};
struct helper_c
{
int pad = 0;
train_c m_h;
helper_c() { std::cout << "\nHELPER default " << this; }
// helper_c( train_c c ) : m_h { std::move(c) } {
// std::cout << "\nHELPER " << this << " from " << &c << " " << &m_h << " " << m_h.m_x; }
template <typename TRAIN_C>
helper_c ( TRAIN_C&& c ): m_h { std::forward<TRAIN_C> ( c ) }
{
std::cout << "\nHELPER_TT " << this << " from " << &c << " " << &m_h << " " << m_h.m_x;
}
~helper_c() { std::cout << "\n~HELPER " << this; }
};
template <typename TRAIN_C>
helper_c* make_helper ( TRAIN_C&& c )
{
std::cout << "\n====> called make_helper with " << &c;
return new helper_c ( std::forward<TRAIN_C>(c) );
}
TEST ( functions, trainer )
{
std::cout << "\n\n==> usual pass";
{
train_c a (10);
auto* foo = make_helper (a);
std::cout << "\n==> made foo " << foo->m_h.m_x << " a is " << a.m_x;
delete foo;
}
std::cout << "\n\n==> indirect ctr";
auto fee = make_helper (11);
std::cout << "\n==> made fee " << fee->m_h.m_x;
delete fee;
std::cout << "\n\n==> direct ctr";
auto bar = make_helper ( train_c (12) );
std::cout << "\n==> made fee " << bar->m_h.m_x;
delete bar;
}
helper_c* make_helper_byval( train_c c )
{
std::cout << "\n====> called make_helper_byval with " << &c;
return new helper_c( std::move( c ));
}
TEST ( functions, trainer_by_val )
{
std::cout << "\n\n==> usual pass";
{
train_c a( 10 );
auto* foo = make_helper_byval( a );
std::cout << "\n==> made foo " << foo->m_h.m_x << " a is " << a.m_x;
delete foo;
}
std::cout << "\n\n==> indirect ctr";
auto fee = make_helper_byval( 11 );
std::cout << "\n==> made fee " << fee->m_h.m_x;
delete fee;
std::cout << "\n\n==> direct ctr";
auto bar = make_helper_byval( train_c( 12 ));
std::cout << "\n==> made fee " << bar->m_h.m_x;
delete bar;
}
TEST ( functions, VectorEx )
{
using namespace sph;
CSphTightVector<int> dTVec;
CSphVector<int> dVec;
dVec.Add ( 1 );
dVec.Add ( 2 );
auto &dv = dVec.Add ();
dv = 3;
dVec.Add ( 4 );
dVec.Add ( 5 );
dVec.Add ( 6 );
dVec.Add ( 7 );
dVec.RemoveValue ( 2 );
dVec.Add ( 8 );
dVec.Add ( 9 );
dVec.RemoveValue ( 9 );
dVec.Add ( 9 );
dVec.Add ( 10 );
dVec.RemoveValue ( 10 );
ASSERT_EQ ( dVec.GetLength (), 8 );
dTVec.Add(30);
dTVec.Add(20);
dVec.Append ( dTVec );
ASSERT_EQ ( dVec.GetLength (), 10 );
// dVec.SwapData (dTVec);
LazyVector_T<int> dLVec;
dLVec.Add(4);
dLVec.Add(5);
ASSERT_EQ ( dLVec.GetLength (), 2 );
dTVec.Append (dLVec);
ASSERT_EQ ( dTVec.GetLength (), 4 );
int* VARIABLE_IS_NOT_USED pData = dTVec.LeakData();
}
TEST ( functions, VectorCopyMove )
{
using vec = CSphVector<int>;
vec dVec;
dVec.Add ( 1 );
dVec.Add ( 2 );
dVec.Add ( 3 );
dVec.Add ( 4 );
dVec.Add ( 5 );
vec dCopy ( dVec ); // copy c-tr
vec dCopy2; // default c-tr
dCopy2 = dVec; // copy c-tr dVec to tmp, then swap dCopy2 with tmp; then d-tr of empty tmp.
vec dMove ( std::move ( dCopy )); // move c-tr
vec dMove2; // default ctr
dMove2 = std::move ( dCopy2 ); // move ctr dCopy2 to tmp, swap dMove2 with tmp; dtr empty tmp.
}
TEST ( functions, LazyVectorCopyMove )
{
using vec = LazyVector_T<int>;
vec dVec;
dVec.Add ( 1 );
dVec.Add ( 2 );
dVec.Add ( 3 );
dVec.Add ( 4 );
dVec.Add ( 5 );
// vec dCopy ( dVec ); // will not compile since copy c-tr is deleted
vec dCopy;
dCopy.Append(dVec);
}
TEST ( functions, BitVec_managing )
{
{
CSphBitvec foo ( 10 );
CSphBitvec bar ( 1000 );
foo.BitSet ( 9 );
bar.BitSet ( 900 );
bar.Swap ( foo );
ASSERT_TRUE ( foo.BitGet ( 900 ) );
ASSERT_TRUE ( bar.BitGet ( 9 ) );
}
{
CSphBitvec baz;
CSphBitvec fee ( 100 );
fee.BitSet ( 90 );
baz = std::move ( fee );
ASSERT_TRUE ( baz.BitGet ( 90 ) );
}
{
CSphBitvec baz;
CSphBitvec fee ( 1000 );
fee.BitSet ( 90 );
baz = std::move ( fee );
ASSERT_TRUE ( baz.BitGet ( 90 ) );
}
}
TEST ( functions, warner_c )
{
Warner_c sMsg;
// output two errors - expect ,-separated
sMsg.Err("Error 1");
sMsg.Err("Error 2");
ASSERT_STREQ ( sMsg.sError(), "Error 1, Error 2");
// formatted output
sMsg.Clear();
sMsg.Err("Error %d", 10);
ASSERT_STREQ ( sMsg.sError (), "Error 10" );
// finalized combo output for errors only
CSphString sFinal;
sMsg.MoveAllTo (sFinal);
ASSERT_STREQ ( sFinal.cstr (), "ERRORS: Error 10" );
// finalized combo output for warnings only
sMsg.Warn ( "msg 1" );
sMsg.Warn ( "msg %d", 2 );
sMsg.MoveAllTo ( sFinal );
ASSERT_STREQ ( sFinal.cstr (), "WARNINGS: msg 1, msg 2" );
// output two warnings (same as with error - expected ,-separated)
sMsg.Warn ( "msg 1" );
sMsg.Warn ( "msg %d", 2 );
ASSERT_STREQ ( sMsg.sWarning (), "msg 1, msg 2" );
// finalized combo output of both errors and warnings
sMsg.Err ( "Error %d", 10 );
sMsg.MoveAllTo ( sFinal );
ASSERT_STREQ ( sFinal.cstr (), "ERRORS: Error 10; WARNINGS: msg 1, msg 2" );
}
// testing our priority queue
TEST ( functions, CSphQueue )
{
int iMin = 1000;
CSphQueue<int, SphLess_T<int> > qQ ( 10 );
for ( auto iVal : { 89, 5, 4, 8, 4, 3, 1, 5, 4, 2 } )
{
qQ.Push ( iVal );
iMin = Min ( iMin, iVal );
ASSERT_EQ ( qQ.Root (), iMin ) << "min elem always on root";
}
ASSERT_EQ ( qQ.GetLength (), 10 ); ASSERT_EQ ( qQ.Root(), 1); qQ.Pop();
ASSERT_EQ ( qQ.GetLength (), 9 ); ASSERT_EQ ( qQ.Root (), 2 ); qQ.Pop();
ASSERT_EQ ( qQ.GetLength (), 8 ); ASSERT_EQ ( qQ.Root (), 3 ); qQ.Pop();
ASSERT_EQ ( qQ.GetLength (), 7 ); ASSERT_EQ ( qQ.Root (), 4 ); qQ.Pop();
ASSERT_EQ ( qQ.GetLength (), 6 ); ASSERT_EQ ( qQ.Root (), 4 ); qQ.Pop();
ASSERT_EQ ( qQ.GetLength (), 5 ); ASSERT_EQ ( qQ.Root (), 4 ); qQ.Pop();
ASSERT_EQ ( qQ.GetLength (), 4 ); ASSERT_EQ ( qQ.Root (), 5 ); qQ.Pop();
ASSERT_EQ ( qQ.GetLength (), 3 ); ASSERT_EQ ( qQ.Root (), 5 ); qQ.Pop();
ASSERT_EQ ( qQ.GetLength (), 2 ); ASSERT_EQ ( qQ.Root (), 8 ); qQ.Pop();
ASSERT_EQ ( qQ.GetLength (), 1 ); ASSERT_EQ ( qQ.Root (), 89 ); qQ.Pop();
ASSERT_EQ ( qQ.GetLength (), 0 );
qQ.Push(1000);
ASSERT_EQ ( qQ.Root (), 1000 ) << "pushed 1000 to empty, it is root now";
qQ.Push ( 100 );
ASSERT_EQ ( qQ.Root (), 100 ) << "pushed 100 over 1000, it became root now";
}
TEST ( functions, path )
{
CSphString sSrc1 ( "/home/build/test/data/pq2" );
CSphString sPath1 = GetPathOnly ( sSrc1 );
ASSERT_STREQ ( sPath1.cstr(), "/home/build/test/data/" );
CSphString sSrc2 ( "home/pq2" );
CSphString sPath2 = GetPathOnly ( sSrc2 );
ASSERT_STREQ ( sPath2.cstr(), "home/" );
CSphString sSrc3 ( "/pq2" );
CSphString sPath3 = GetPathOnly ( sSrc3 );
ASSERT_STREQ ( sPath3.cstr(), "/" );
CSphString sSrc4 ( "/home/pq2" );
CSphString sPath4 = GetPathOnly ( sSrc4 );
ASSERT_STREQ ( sPath4.cstr(), "/home/" );
CSphString sSrc5 ( "/home/build/" );
CSphString sPath5 = GetPathOnly ( sSrc5 );
ASSERT_STREQ ( sPath5.cstr(), "/home/build/" );
CSphString sSrc6 ( "home/build/" );
CSphString sPath6 = GetPathOnly ( sSrc6 );
ASSERT_STREQ ( sPath6.cstr(), "home/build/" );
CSphString sSrc11 ( "/home/pq2" );
CSphString sFile11 = GetBaseName ( sSrc11 );
ASSERT_STREQ ( sFile11.cstr(), "pq2" );
CSphString sSrc12 ( "home/pq2" );
CSphString sFile12 = GetBaseName ( sSrc12 );
ASSERT_STREQ ( sFile12.cstr(), "pq2" );
CSphString sSrc13 ( "pq2" );
CSphString sFile13 = GetBaseName ( sSrc13 );
ASSERT_STREQ ( sFile13.cstr(), "pq2" );
CSphString sSrc14 ( "/pq2" );
CSphString sFile14 = GetBaseName ( sSrc14 );
ASSERT_STREQ ( sFile14.cstr(), "pq2" );
}
TEST ( functions, IsTriviallyCopyable )
{
EXPECT_TRUE ( IS_TRIVIALLY_COPYABLE ( DWORD ) ) << "DWORD";
EXPECT_TRUE ( IS_TRIVIALLY_COPYABLE ( DWORD[] ) ) << "DWORD[]";
ASSERT_TRUE ( IS_TRIVIALLY_COPYABLE ( DWORD* ) ) << "DWORD*";
ASSERT_FALSE ( IS_TRIVIALLY_COPYABLE ( CSphFixedVector<DWORD> ) ) << "CSphFixedVector<DWORD>";
ASSERT_FALSE ( IS_TRIVIALLY_COPYABLE ( CSphString )) << "CSphString";
}
int iCountCtr = 0;
int iCountDtr = 0;
struct NonDefaultCtr_t
{
int& m_iFoo; // this effectively disables default c-tr
int m_iNum;
NonDefaultCtr_t ( int & iFoo, int iNum=0 ) : m_iFoo ( iFoo ), m_iNum ( iNum )
{
++iCountCtr;
}
~NonDefaultCtr_t ()
{
++iCountDtr;
}
};
TEST ( functions, RawVector )
{
iCountCtr = iCountDtr = 0;
RawVector_T<NonDefaultCtr_t> testv;
ASSERT_EQ ( iCountCtr, 0 );
ASSERT_EQ ( iCountDtr, 0 );
EXPECT_FALSE ( IS_TRIVIALLY_DEFAULT_CONSTRUCTIBLE ( NonDefaultCtr_t )) << "NonDefaultCtr_t";
// testv.Reserve (100); //< will not compile since Reserve() may reallocate which needs copy ctr
testv.Reserve_static ( 1000 );
ASSERT_EQ ( iCountCtr, 0 ) << "nothing constructed";
ASSERT_EQ ( iCountDtr, 0 ) << "nothing destructed";
int foo;
for ( int i=0; i<100; ++i )
testv.Emplace_back(foo,i);
ASSERT_EQ ( iCountCtr, 100 );
ASSERT_EQ ( iCountDtr, 0 ) << "nothing destructed";
// testv.Resize(500); //< will not compile, since Resize() may imply Reserve
// Shrink down to 10 elems. It implies that 90 elems will be destructed, and nothing new added
testv.Shrink ( 10 );
ASSERT_EQ ( iCountCtr, 100 );
ASSERT_EQ ( iCountDtr, 90 );
ASSERT_EQ ( testv.GetLength(), 10);
for ( auto& elem : testv )
ASSERT_EQ ( &elem.m_iFoo, &foo );
// add 10 another elems. It implies that 10 new will be constructed, nothing deleted
int bar;
for ( int i = 0; i<10; ++i )
testv.Emplace_back ( bar, i );
ASSERT_EQ ( iCountCtr, 110 );
ASSERT_EQ ( iCountDtr, 90 );
// ensure that 1-st 10 elemst refers to foo, second 10 - to bar
for ( int i = 0; i<10; ++i )
ASSERT_EQ ( &testv[i].m_iFoo, &foo );
for ( int i = 10; i<20; ++i )
ASSERT_EQ ( &testv[i].m_iFoo, &bar );
}
int iCountMoving = 0;
struct TrivialStructure_t
{
int m_iNum = -1;
TrivialStructure_t ( int iNum = -1 ) : m_iNum ( iNum )
{
++iCountCtr;
}
TrivialStructure_t ( const TrivialStructure_t& ) = default;
TrivialStructure_t ( TrivialStructure_t && rhs ) noexcept { ++iCountMoving; Swap ( rhs ); }
TrivialStructure_t & operator= ( TrivialStructure_t rhs ) noexcept { Swap(rhs); return *this; }
void Swap ( TrivialStructure_t & rhs ) noexcept
{
::Swap ( m_iNum, rhs.m_iNum );
}
~TrivialStructure_t ()
{
++iCountDtr;
}
};
template<typename T>
using RawTrivialVector_T = sph::Vector_T<T, sph::DefaultCopy_T<T>, sph::DefaultRelimit, sph::RawStorage_T<T>>;
TEST ( functions, RawTrivialVector )
{
EXPECT_FALSE ( IS_TRIVIALLY_DEFAULT_CONSTRUCTIBLE ( TrivialStructure_t )) << "TrivialStructure_t";
iCountCtr = iCountDtr = 0;
RawTrivialVector_T<TrivialStructure_t> testv;
ASSERT_EQ ( iCountCtr, 0 );
ASSERT_EQ ( iCountDtr, 0 );
// reserve of empty - relocates nothing
testv.Reserve ( 100 );
ASSERT_EQ ( iCountCtr, 0 ) << "nothing constructed";
ASSERT_EQ ( iCountDtr, 0 ) << "nothing destructed";
// static reserve of empty - destroys/create nothing
testv.Reserve_static ( 1000 );
ASSERT_EQ ( iCountCtr, 0 ) << "nothing constructed";
ASSERT_EQ ( iCountDtr, 0 ) << "nothing destructed";
// explicitly construct 100 elems
for ( int i=0; i<100; ++i )
testv.Emplace_back(55);
ASSERT_EQ ( iCountCtr, 100 );
ASSERT_EQ ( iCountDtr, 0 ) << "nothing destructed";
// resize to add 100 more elems and expect they just default c-tred
testv.Resize(200);
ASSERT_EQ ( iCountMoving, 0 );
ASSERT_EQ ( iCountCtr, 200 );
ASSERT_EQ ( iCountDtr, 0 );
// check that 1-st 100 elems are c-tred, and another are default c-tred
for ( int i = 0; i<100; ++i )
ASSERT_EQ ( testv[i].m_iNum, 55 );
for ( int i = 100; i<200; ++i )
ASSERT_EQ ( testv[i].m_iNum, -1 );
// Shrink down to 10 elems. It implies that 190 elems will be destructed, and nothing new added
testv.Shrink ( 10 );
ASSERT_EQ ( iCountCtr, 200 );
ASSERT_EQ ( iCountDtr, 190 );
ASSERT_EQ ( testv.GetLength(), 10);
// add 10 another elems. It implies that 10 new will be constructed, nothing deleted
for ( int i = 0; i<10; ++i )
testv.Emplace_back ( i );
ASSERT_EQ ( iCountCtr, 210 );
ASSERT_EQ ( iCountDtr, 190 );
ASSERT_EQ ( iCountMoving, 0 );
/* RemoveFast.
* Swap inside implies triple move/destruct:
* T temp = std::move ( v1 ); // 1-st move
* v1 = std::move ( v2 ); // destruct of v1, then 2-nd move
* v2 = std::move ( temp ); // destruct of v2, then 3-rd move
* } // destruct of temp
*
* Then removing last elem implies 4-th destruct. Totally: 3 moving, 4 destructs
* (usual vec in preallocated storage will have 3/3 here since removing doesn't destroy objects.)
*/
iCountCtr = iCountDtr = 0;
testv.RemoveFast(1);
ASSERT_EQ ( iCountCtr, 0 );
ASSERT_EQ ( iCountDtr, 4 );
ASSERT_EQ ( iCountMoving, 3 );
}
TEST ( functions, SharedPtr )
{
SharedPtr_t<int> pFoo;
ASSERT_FALSE ( bool(pFoo) );
{
SharedPtr_t<int> pBar { new int };
*pBar = 10;
pFoo = pBar;
ASSERT_EQ ( *pFoo, 10 );
}
auto b = new int;
*b = 20;
ASSERT_EQ ( *pFoo, 10 );
pFoo = b;
ASSERT_EQ ( *pFoo, 20 );
}
TEST ( functions, SharedPtrCompare )
{
SharedPtr_t<int> pFoo;
ASSERT_EQ ( pFoo, nullptr );
ASSERT_TRUE ( pFoo == nullptr );
SharedPtr_t<int> pBar { new int };
*pBar = 10;
pFoo = pBar;
auto pBaz = pFoo;
ASSERT_EQ ( *pFoo, 10 );
ASSERT_EQ ( pFoo, pBar );
ASSERT_EQ ( pFoo, pBaz );
ASSERT_EQ ( pBaz, pBar );
ASSERT_TRUE ( pFoo == pBar );
ASSERT_TRUE ( pFoo == pBaz );
ASSERT_TRUE ( pBaz == pBar );
}
void pr (const VecTraits_T<DWORD>& dData, int a=-1, int b=-1)
{
return;
for ( auto i=0,len=dData.GetLength(); i<len; ++i)
if (i==a)
printf ("/%d, ", dData[i]);
else if (i==b)
printf ( "%d\\, ", dData[i] );
else
printf ( "%d, ", dData[i] );
printf ( "(%d/%d)\n", b-a+1, dData.GetLength ());
}
int iCompared;
int make_partition (DWORD iPivot, int iNeedElems, VecTraits_T<DWORD> dData )
{
// printf ( "iPivot=%d, need %d, has %d\n", iPivot, iNeedElems, dData.GetLength() );
// int iPass = 0;
auto cmp = Lesser ( [] ( int a, int b ) {
++iCompared;
return a>b;
} );
--iNeedElems;
int a=0;
int b=dData.GetLength()-1;
while (true)
{
int i=a;
int j=b;
// pr ( dData, i, j );
while (i<=j)
{
while (cmp.IsLess (dData[i],iPivot))
++i;
while (cmp.IsLess ( iPivot, dData[j]))
--j;
if ( i<=j ) {
::Swap(dData[i],dData[j]);
// pr ( dData, i, j );
++i;
--j;
}
}
// printf ( "i=%d, j=%d, Di=%d, Dj=%d\n", i, j, dData[i], dData[j] );
if ( iNeedElems == j )
break;
if ( iNeedElems < j)
b = j; // too many elems aquired; continue with left part
else
a = i; // too less elems aquired; continue with right part
// iPivot = dData[(a+b)/2];
iPivot = dData[( a*3+b ) / 4]; // ( a*(COEF-1)+b)/COEF
// printf ( "a=%d, b=%d, pivot=%d\n", a,b,iPivot );
// ++iPass;
}
// printf ( "partitioning completed in %d passes, %d comparisions, new pivot %d\n", iPass, iCompared, iPivot );
return iPivot;
}
int lazy_partition ( VecTraits_T<DWORD>& dData,int iPivot, int COEFF )
{
auto iElems = dData.GetLength();
int N = iElems/COEFF;
if ( iPivot<0 )
{
auto iPivotIndex = N / COEFF+1;
iPivot = dData[iPivotIndex];
// printf ("1-st pass\n");
}
return make_partition (iPivot,N,dData);
}
bool CheckData ( VecTraits_T<DWORD> & dData, int COEFF )
{
auto iElems = dData.GetLength ();
auto N = iElems / COEFF;
DWORD val = 0xFFFFFFFF;
for ( auto i=0; i<N; ++i)
val = Min(val,dData[i]);
for ( auto i=N;i<N * ( COEFF-1 ); ++i)
if (dData[i]>val)
{
printf ("%d-f elem %d misplaced (%d)", i, dData[i], val);
return false;
}
return true;
}
TEST ( functions, partition_random )
{
const auto N = 1000;
const auto COEFF = 4;
const auto PASSES = 10000;
const auto LIMIT = 1000000000;
CSphVector<DWORD> dValues;
dValues.Reserve ( N*COEFF );
for ( auto i=0; i<N; ++i)
dValues.Add(sphRand()% LIMIT);
dValues.Sort( Lesser ( [] ( int a, int b ) { return a>b; } ));
for ( auto i=0; i<N*( COEFF-1); ++i)
dValues.Add ( sphRand ()% LIMIT);
pr(dValues,0,N-1);
iCompared = 0;
// let's begin
int iPivot = -1;
for ( auto i=0; i<PASSES; ++i)
{
for (auto j=N;j<N*(COEFF-1);++j)
dValues[j] = sphRand()% LIMIT;
iPivot = lazy_partition ( dValues, iPivot, COEFF );
ASSERT_TRUE ( CheckData ( dValues, COEFF )) << "failed on " << i << " pass.";
}
// printf ( "After partitioning\n" );
pr ( dValues, 0, N-1 );
printf ("\n avg %f comparisions per pass of %d elems\n", float(iCompared)/float(PASSES), dValues.GetLength());
// ASSERT_STREQ ( nullptr, "1.100000" );
}
TEST ( functions, partition_monoasc )
{
const auto COEFF = 4;
CSphVector<DWORD> dValues;
for (auto i=0; i<100; ++i)
dValues.Add(1);
for ( auto i = 0; i<300; ++i )
dValues.Add ( 2 );
pr ( dValues, 0, 99 );
lazy_partition ( dValues, -1, COEFF );
pr ( dValues, 0, 99 );
ASSERT_TRUE ( CheckData ( dValues, COEFF ));
}
TEST ( functions, partition_monodesc )
{
const auto COEFF = 4;
CSphVector<DWORD> dValues;
for ( auto i = 0; i<100; ++i )
dValues.Add ( 2 );
for ( auto i = 0; i<300; ++i )
dValues.Add ( 1 );
pr ( dValues, 0, 99 );
lazy_partition ( dValues, -1, COEFF );
pr ( dValues, 0, 99 );
ASSERT_TRUE ( CheckData ( dValues, COEFF ));
}
TEST ( functions, partition_ascending )
{
const auto COEFF = 4;
CSphVector<DWORD> dValues;
for ( auto i = 0; i<400; ++i )
dValues.Add ( i );
pr ( dValues, 0, 99 );
lazy_partition ( dValues, -1, COEFF );
pr ( dValues, 0, 99 );
ASSERT_TRUE ( CheckData ( dValues, COEFF ));
}
TEST ( functions, partition_descending )
{
const auto COEFF = 4;
CSphVector<DWORD> dValues;
for ( auto i = 0; i<400; ++i )
dValues.Add ( 1000-i );
pr ( dValues, 0, 99 );
lazy_partition ( dValues, -1, COEFF );
pr ( dValues, 0, 99 );
ASSERT_TRUE ( CheckData ( dValues, COEFF ));
}
static CSphString GetHist ( const Histogram_i * pHist )
{
StringBuilder_c tOut;
pHist->Dump ( tOut );
const char * pFull = tOut.cstr();
const char * sDel = strchr ( pFull, '\n' );
int iLen = tOut.GetLength() - ( sDel - pFull ) + 1;
CSphString sDump;
sDump.SetBinary ( sDel+1, iLen );
return sDump;
}
static const float g_dHistSrc[] = {0.0f,41.0f,50.0f,54.0f,60.0f,61.0f,63.0f,64.0f,65.0f,67.0f,68.0f,69.0f,71.0f,72.0f,73.0f,74.0f,75.0f,76.0f,77.0f,78.0f,79.0f,80.0f,81.0f,
83.0f,84.0f,96.0f,107.0f,143.0f,147.0f,148.0f,149.0f,150.0f,151.0f,152.0f,153.0f,154.0f,155.0f,156.0f,157.0f,158.0f,159.0f,160.0f,162.0f,165.0f,
166.0f,167.0f,168.0f,169.0f,170.0f,171.0f,175.0f,178.0f,180.0f,181.0f,182.0f,183.0f,184.0f,185.0f,186.0f,188.0f,189.0f,190.0f,192.0f,193.0f,
195.0f,197.0f,198.0f};
struct HistCase_t
{
int m_iLoop = 0;
int m_iSize = 0;
const char * m_sRef = nullptr;
};
static std::unique_ptr<Histogram_i> PopulateHist ( const HistCase_t & tCase, bool bFinalize = true )
{
std::unique_ptr<Histogram_i> pHist = CreateHistogram ( "dyn", SPH_ATTR_FLOAT, tCase.m_iSize );
for ( int i=0; i<tCase.m_iLoop; i++ )
{
for ( float fVal : g_dHistSrc )
{
SphAttr_t tVal = sphF2DW ( fVal );
pHist->Insert ( tVal );
}
}
if ( bFinalize )
pHist->Finalize();
return pHist;
}
TEST ( functions, histogram )
{
HistCase_t dCases[] = {
{1, 35, R"(values:35
0.000,1;41.000,1;50.000,1;54.000,1;60.500,2;64.000,3;68.000,3;72.500,4;75.500,2;77.500,2;80.000,3;83.500,2;96.000,1;107.000,1;143.000,1;147.500,2;149.500,2;151.500,2;153.500,2;155.500,2;157.500,2;159.500,2;162.000,1;165.500,2;167.500,2;170.000,3;175.000,1;178.000,1;180.500,2;182.500,2;185.000,3;189.000,3;192.500,2;195.000,1;197.500,2)"},
{2, 35, R"(values:35
0.000,2;41.000,2;50.000,2;54.000,2;60.500,4;64.000,6;68.000,6;72.286,7;75.200,5;77.500,4;80.000,6;83.500,4;96.000,2;107.000,2;143.000,2;147.500,4;149.500,4;151.500,4;153.500,4;155.500,4;157.500,4;159.500,4;162.000,2;165.500,4;167.500,4;170.000,6;175.000,2;178.000,2;180.500,4;182.500,4;185.000,6;189.000,6;192.500,4;195.000,2;197.500,4)"},
{1, 65, R"(values:65
0.000,1;41.000,1;50.000,1;54.000,1;60.500,2;63.500,2;65.000,1;67.000,1;68.000,1;69.000,1;71.000,1;72.000,1;73.000,1;74.000,1;75.000,1;76.000,1;77.000,1;78.000,1;79.000,1;80.000,1;81.000,1;83.000,1;84.000,1;96.000,1;107.000,1;143.000,1;147.000,1;148.000,1;149.000,1;150.000,1;151.000,1;152.000,1;153.000,1;154.000,1;155.000,1;156.000,1;157.000,1;158.000,1;159.000,1;160.000,1;162.000,1;165.000,1;166.000,1;167.000,1;168.000,1;169.000,1;170.000,1;171.000,1;175.000,1;178.000,1;180.000,1;181.000,1;182.000,1;183.000,1;184.000,1;185.000,1;186.000,1;188.000,1;189.000,1;190.000,1;192.000,1;193.000,1;195.000,1;197.000,1;198.000,1)"},
{1, 70, R"(values:67
0.000,1;41.000,1;50.000,1;54.000,1;60.000,1;61.000,1;63.000,1;64.000,1;65.000,1;67.000,1;68.000,1;69.000,1;71.000,1;72.000,1;73.000,1;74.000,1;75.000,1;76.000,1;77.000,1;78.000,1;79.000,1;80.000,1;81.000,1;83.000,1;84.000,1;96.000,1;107.000,1;143.000,1;147.000,1;148.000,1;149.000,1;150.000,1;151.000,1;152.000,1;153.000,1;154.000,1;155.000,1;156.000,1;157.000,1;158.000,1;159.000,1;160.000,1;162.000,1;165.000,1;166.000,1;167.000,1;168.000,1;169.000,1;170.000,1;171.000,1;175.000,1;178.000,1;180.000,1;181.000,1;182.000,1;183.000,1;184.000,1;185.000,1;186.000,1;188.000,1;189.000,1;190.000,1;192.000,1;193.000,1;195.000,1;197.000,1;198.000,1)"},
{20, 15, R"(values:15
0.000,20;41.000,20;52.000,40;62.600,100;70.587,138;79.115,182;96.000,20;107.000,20;143.000,20;150.126,143;157.656,157;168.000,140;176.585,41;183.057,140;192.780,159)"}
};
for ( const HistCase_t & tCase : dCases )
{
std::unique_ptr<Histogram_i> pHist = PopulateHist ( tCase );
ASSERT_STREQ( GetHist ( pHist.get() ).cstr(), tCase.m_sRef );
}
// estimate of merged values
{
HistCase_t tCase;
tCase.m_iLoop = 1;
tCase.m_iSize = 10;
std::unique_ptr<Histogram_i> pHist = PopulateHist ( tCase, false );
for ( int i=0; i<20; i++)
pHist->Insert ( sphF2DW ( 10.0f ) );
pHist->Finalize();
CSphFilterSettings tFilter;
tFilter.m_eType = SPH_FILTER_FLOATRANGE;
tFilter.m_fMinValue = 0.0f;
tFilter.m_fMaxValue = 10.0f;
HistogramRset_t tRes;
pHist->EstimateRsetSize ( tFilter, tRes );
ASSERT_EQ( tRes.m_iTotal, 20 );
}
}
TEST ( functions, field_mask )
{
FieldMask_t foo;
foo.Assign32(0x55555555);
ASSERT_TRUE ( foo.Test ( 6 ) );
ASSERT_TRUE ( foo.Test ( 30 ) );
foo.DeleteBit (15);
ASSERT_EQ (foo.GetMask32(),0x2AAAD555);
// test that removing a high bit works
foo.Set (250);
foo.DeleteBit(249);
ASSERT_TRUE ( foo.Test (249));
// test that removing DWORD-edge bit carries edge correct
foo.Set(224);
foo.DeleteBit(223);
ASSERT_TRUE ( foo.Test ( 223 ) );
}
class foo
{
public:
void bar() {};
static void bar_static() {}
};
TEST ( functions, static_trait )
{
ASSERT_TRUE ( std::is_member_function_pointer<decltype ( &foo::bar )>::value );
ASSERT_FALSE ( std::is_member_function_pointer<decltype ( &foo::bar_static )>::value );
}
template<typename T>
class RefCountedTestVec_T final : public ISphRefcountedMT, public LazyVector_T<T>
{
protected:
~RefCountedTestVec_T () final { }
public:
RefCountedTestVec_T () = default;
};
template <typename T>
using DataVecRefPtr_t = CSphRefcountedPtr<RefCountedTestVec_T<T> >;
template <typename T>
using ConstDataVecRefPtr_t = CSphRefcountedPtr<const RefCountedTestVec_T<T> >;
struct DataVecMutable_c : ISphNoncopyable
{
public:
ConstDataVecRefPtr_t<int> & m_tOwner;
DataVecRefPtr_t<int> m_tNewData;
// shortcuts
RefCountedTestVec_T<int> & m_dData;
explicit DataVecMutable_c ( ConstDataVecRefPtr_t<int>& tData )
: m_tOwner { tData }
, m_tNewData { new RefCountedTestVec_T<int> () }
, m_dData { *m_tNewData }
{
m_dData.Reserve ( tData->GetLength() );
for ( const auto & i : *tData )
m_dData.Add(i);
}
~DataVecMutable_c ()
{
m_tOwner = m_tNewData.Leak();
}
};
TEST ( functions, mutate_via_ref )
{
// original immutable data
DataVecRefPtr_t<int> origData { new RefCountedTestVec_T<int> };
origData->Add ( 1 );
origData->Add ( 2 );
origData->Add ( 3 );
ASSERT_EQ ( origData->GetRefcount (), 1 );
// make const snapshot of orig data
ConstDataVecRefPtr_t<int> refData;
refData = origData.Leak();
ASSERT_EQ ( refData->GetLength (), 3 );
ASSERT_EQ ( ( *refData )[0], 1 );
ASSERT_EQ ( ( *refData )[1], 2 );
ASSERT_EQ ( ( *refData )[2], 3 );
ASSERT_EQ ( refData->GetRefcount(), 1 );
ASSERT_EQ ( refData->GetRefcount (), 1 );
auto prevRefData = refData;
ASSERT_EQ ( refData->GetRefcount (), 2 );
// make mutable snapshot
{
DataVecMutable_c foo { refData };
ASSERT_EQ ( refData->GetRefcount (), 2 );
ASSERT_EQ ( foo.m_tNewData->GetRefcount (), 1 );
ASSERT_EQ ( foo.m_tNewData->GetLength (), 3 );
ASSERT_EQ ( foo.m_dData.GetLength (), 3 );
ASSERT_EQ ( &foo.m_dData, foo.m_tNewData );
// mutate mutable data
foo.m_dData.Resize(4);
foo.m_dData[0] = 10;
foo.m_dData[3] = 42;
// check that orig (ref) is not changed
ASSERT_EQ ( refData->GetLength (), 3 );
ASSERT_EQ ( ( *refData )[0], 1 );
ASSERT_EQ ( ( *refData )[1], 2 );
ASSERT_EQ ( ( *refData )[2], 3 );
// check that mutation is changed
ASSERT_EQ ( foo.m_tNewData->GetLength (), 4 );
ASSERT_EQ ( foo.m_dData.GetLength (), 4 );
ASSERT_EQ ( foo.m_dData[0], 10 );
ASSERT_EQ ( foo.m_dData[1], 2 );
ASSERT_EQ ( foo.m_dData[2], 3 );
ASSERT_EQ ( foo.m_dData[3], 42 );
}
// check that prev data is still unchanged (i.e. 'another reader')
ASSERT_EQ ( prevRefData->GetLength (), 3 );
ASSERT_EQ ( ( *prevRefData )[0], 1 );
ASSERT_EQ ( ( *prevRefData )[1], 2 );
ASSERT_EQ ( ( *prevRefData )[2], 3 );
ASSERT_EQ ( prevRefData->GetRefcount (), 1 );
// check that ref data is now pointee to new values provided by mutator
ASSERT_EQ ( refData->GetLength (), 4 );
ASSERT_EQ ( ( *refData )[0], 10 );
ASSERT_EQ ( ( *refData )[1], 2 );
ASSERT_EQ ( ( *refData )[2], 3 );
ASSERT_EQ ( ( *refData )[3], 42 );
ASSERT_EQ ( refData->GetRefcount (), 1 );
}
| 64,653
|
C++
|
.cpp
| 1,885
| 32.060477
| 646
| 0.63177
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,996
|
gtests_searchdaemon.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_searchdaemon.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <gtest/gtest.h>
#include "sphinxint.h"
#include "searchdaemon.h"
#include "searchdha.h"
#include "searchdreplication.h"
// QueryStatElement_t uses default ctr with inline initializer;
// this test is just to be sure it works correctly
TEST ( functions, QueryStatElement_t_ctr )
{
using namespace QueryStats;
QueryStatElement_t tElem;
ASSERT_EQ ( sizeof(tElem.m_dData), TYPE_TOTAL*sizeof(tElem.m_dData[0]));
ASSERT_EQ ( tElem.m_uTotalQueries, 0);
ASSERT_EQ ( tElem.m_dData[TYPE_AVG], 0 );
ASSERT_EQ ( tElem.m_dData[TYPE_MIN], UINT64_MAX );
ASSERT_EQ ( tElem.m_dData[TYPE_MAX], 0 );
ASSERT_EQ ( tElem.m_dData[TYPE_95], 0 );
ASSERT_EQ ( tElem.m_dData[TYPE_99], 0 );
}
class tstlogger
{
// test helper log - logs into sLogBuff.
static void TestLogger ( ESphLogLevel eLevel, const char * sFmt, va_list ap )
{
if ( eLevel>m_eMaxLevel )
return;
if ( m_bOutToStderr )
vfprintf ( stderr, sFmt, ap);
else
{
const char * lvl = "";
switch (eLevel) {
case SPH_LOG_FATAL:
lvl = "FATAL: ";
break;
case SPH_LOG_WARNING:
case SPH_LOG_INFO:
lvl = "WARNING: ";
break;
case SPH_LOG_DEBUG:
case SPH_LOG_RPL_DEBUG:
case SPH_LOG_VERBOSE_DEBUG:
case SPH_LOG_VERY_VERBOSE_DEBUG:
default:
lvl = "DEBUG: ";
break;
}
char * pOut = sLogBuff;
pOut += snprintf( pOut, 1023, "%s", lvl );
vsnprintf( pOut, 1013, sFmt, ap );
}
}
protected:
void setup ()
{
g_pLogger() = TestLogger;
sLogBuff[0] = '\0';
}
static char sLogBuff[1024];
static bool m_bOutToStderr;
static ESphLogLevel m_eMaxLevel;
public:
static void SetStderrLogger()
{
m_eMaxLevel = SPH_LOG_DEBUG;
m_bOutToStderr = true;
g_pLogger () = TestLogger;
}
};
char tstlogger::sLogBuff[1024];
bool tstlogger::m_bOutToStderr = false;
ESphLogLevel tstlogger::m_eMaxLevel = SPH_LOG_INFO;
class CustomLogger_c: protected tstlogger, public ::testing::Test
{
protected:
void SetUp() override
{
tstlogger::setup();
}
};
class DeathLogger_c: protected tstlogger, public ::testing::Test
{
protected:
void SetUp() override
{
m_bOutToStderr = true;
tstlogger::setup();
}
};
void SetStderrLogger()
{
tstlogger::SetStderrLogger ();
}
// check how ParseAddressPort holds different cases
class T_ParseAddressPort : public CustomLogger_c
{
protected:
void ParserTest ( const char * sInExpr, // incoming line
bool bExpectedResult, // expect parsed or not
const char *sExpectedTail, // tail of incoming line ret to caller
int iParsedFamily, // expected inet family
const char * sParsedAddress, // expected resulting parsed address/path
int iParsedPort = -1, // expected resulting parsed port
const char * sWarningMessage = "" // expected warning message, if any
)
{
CSphString sError;
WarnInfo_c tInfo {"tstidx", "tstagent", sError };
const char * pTest = sInExpr;
AgentDesc_t tFoo;
bool bResult = ParseAddressPort ( tFoo, &pTest, tInfo );
EXPECT_EQ ( bResult, bExpectedResult ) << sInExpr;
EXPECT_STREQ ( sExpectedTail, pTest );
EXPECT_STREQ ( sWarningMessage, sLogBuff );
// the rest have no sense to check at all if parsing failed
if ( !bResult )
return;
ASSERT_EQ ( iParsedFamily, tFoo.m_iFamily );
ASSERT_STREQ ( sParsedAddress, tFoo.m_sAddr.cstr () );
ASSERT_EQ ( iParsedPort, tFoo.m_iPort );
}
};
// simple IP, no port, default port - success
TEST_F ( T_ParseAddressPort, simple_ip_no_port )
{
ParserTest ( "127.0.0.1:tail", true, ":tail",
AF_INET, "127.0.0.1", IANA_PORT_SPHINXAPI,
"WARNING: table 'tstidx': agent 'tstagent': portnum expected before 'tail' - Using default IANA 9312 port"
);
}
// wrong IP, no port, default port - success (we don't resolve addresses here)
TEST_F ( T_ParseAddressPort, wrong_ip_no_port)
{
ParserTest ( "257.0.0.1|tail", true, "|tail",
AF_INET, "257.0.0.1", IANA_PORT_SPHINXAPI,
"WARNING: table 'tstidx': agent 'tstagent': colon and portnum expected before '|tail' - Using default IANA 9312 port"
);
}
// any host, explicit port - success
TEST_F ( T_ParseAddressPort, any_host_with_port )
{
ParserTest ( "my_server:9654:tail", true, ":tail",
AF_INET, "my_server", 9654);
}
// any host, wrong port - fail
TEST_F ( T_ParseAddressPort, any_host_wrong_port )
{
ParserTest ( "my_server:96540", false, "",
AF_INET, "my_server", IANA_PORT_SPHINXAPI,
"WARNING: table 'tstidx': agent 'tstagent': invalid port number near '', - SKIPPING AGENT"
);
}
// double host, no port, default port: - success, return first host
TEST_F ( T_ParseAddressPort, double_host_no_port )
{
ParserTest ( "my_server:my_server2", true, ":my_server2",
AF_INET, "my_server", IANA_PORT_SPHINXAPI,
"WARNING: table 'tstidx': agent 'tstagent': portnum expected before 'my_server2' - Using default IANA 9312 port"
);
}
// host, port, another host - success, return first host
TEST_F ( T_ParseAddressPort, host_port_host2 )
{
ParserTest ("my_server:1000:my_server2", true, ":my_server2",
AF_INET, "my_server", 1000);
}
// unix host, port, another host - success, leave all the rest after the host
TEST_F ( T_ParseAddressPort, unixhost_port )
{
ParserTest ( "/my_server:1000:my_server2", true, ":1000:my_server2",
AF_UNIX, "/my_server");
}
class T_ConfigureMultiAgent : protected tstlogger, public ::testing::Test
{
protected:
void SetUp () final
{
tstlogger::setup ();
}
AgentOptions_t tAgentOptions { false, false, HA_RANDOM, 3, 0 };
const char * szIndexName = "tstidx";
MultiAgentDescRefPtr_c ParserTestSimple ( const char * sInExpr, bool bExpectedResult )
{
g_bHostnameLookup = true;
const char * pTest = sInExpr;
CSphString sError;
auto pResult = ConfigureMultiAgent ( pTest, "tstidx", tAgentOptions, sError );
EXPECT_EQ ( pResult!=nullptr, bExpectedResult ) << sInExpr;
return pResult;
}
void ParserTest ( const char * sInExpr // incoming line
, bool bExpectedResult // expect parsed or not
, const char * sWarningMessage = "" // expected warning message, if any
)
{
MultiAgentDescRefPtr_c pAgent ( ParserTestSimple ( sInExpr, bExpectedResult ) );
EXPECT_STREQ ( sWarningMessage, sLogBuff );
}
};
TEST_F ( T_ConfigureMultiAgent, wrong_only_options )
{
ParserTest ( "[only=options]", false,
"WARNING: table 'tstidx': agent '[only=options]': "
"one or more hosts/sockets expected before [, - SKIPPING AGENT" );
}
TEST_F ( T_ConfigureMultiAgent, wrong_syntax )
{
ParserTest ( "bla|ble|bli:idx[options]haha", false,
"WARNING: table 'tstidx': agent 'bla|ble|bli:idx[options]haha': "
"wrong syntax: expected one or more hosts/sockets, then m.b. []-enclosed options, - SKIPPING AGENT");
}
TEST_F ( T_ConfigureMultiAgent, wrong_unknown_option )
{
ParserTest ( "bla|ble|bli:idx[conn =pconn, ha_strategy=unknown]", false,
"WARNING: table 'tstidx': agent 'bla|ble|bli:idx[conn =pconn, ha_strategy=unknown]': "
"unknown agent option ' ha_strategy=unknown', - SKIPPING AGENT" );
}
TEST_F ( T_ConfigureMultiAgent, wrong_formatted_option )
{
ParserTest ( "bla|ble|bli[options]", false,
"WARNING: table 'tstidx': agent 'bla|ble|bli[options]': "
"option options error: option and value must be =-separated pair, - SKIPPING AGENT");
}
// fixme: m.b. parse 1000abc as index name in the case?
TEST_F ( T_ConfigureMultiAgent, fixme_wrong_agent_port )
{
ParserTest ( "localhost:1000abc", false, "WARNING: table 'tstidx': agent 'localhost:1000abc': "
"after host/socket expected ':', then table(s), but got 'abc'), - SKIPPING AGENT" );
}
// fixme: m.b. parse 100000abc as index name in the case?
TEST_F ( T_ConfigureMultiAgent, fixme_wrong_portnum )
{
ParserTest ( "localhost:100000abc", false, "WARNING: table 'tstidx': agent 'localhost:100000abc': "
"invalid port number near 'abc', - SKIPPING AGENT" );
}
TEST_F ( T_ConfigureMultiAgent, wrong_idx_name )
{
ParserTest ( "/localhost:idx,idx=name,idx2", false,
"WARNING: table 'tstidx': agent '/localhost:idx,idx=name,idx2': "
"no such table: idx=name, - SKIPPING AGENT" );
}
TEST_F ( T_ConfigureMultiAgent, wrong_idx_delimited_from_host )
{
ParserTest ( "localhost:1000 idx", false, "WARNING: table 'tstidx': agent 'localhost:1000 idx': "
"after host/socket expected ':', then table(s), but got ' idx'), - SKIPPING AGENT" );
}
TEST_F ( T_ConfigureMultiAgent, wrong_empty_idx )
{
ParserTest ( "", false, "WARNING: table 'tstidx': agent '': "
"empty agent definition, - SKIPPING AGENT" );
}
TEST_F ( T_ConfigureMultiAgent, agent_couple_mirrors )
{
ParserTest ( "localhost:1000:idx|localhost:1003:idx1,idx2,idx3", true );
}
TEST_F ( T_ConfigureMultiAgent, agent_host_port_index )
{
ParserTest ( "localhost:1000:idx", true );
}
TEST_F ( T_ConfigureMultiAgent, agent_ok_only_one_index_and_options )
{
ParserTest ( "bla|ble|bli:idx[conn=pconn]", true, "WARNING: table 'tstidx': agent 'bla|ble|bli:idx[conn=pconn]': "
"portnum expected before 'idx' - Using default IANA 9312 port" );
}
TEST_F ( T_ConfigureMultiAgent, agent_ok_options_space_sparsed )
{
ParserTest ( "bla|ble|bli:idx[conn =pconn, blackhole = 1]", true
, "WARNING: table 'tstidx': agent 'bla|ble|bli:idx[conn =pconn, blackhole = 1]': "
"portnum expected before 'idx' - Using default IANA 9312 port" );
}
TEST_F ( T_ConfigureMultiAgent, fully_configured_3_mirrors )
{
MultiAgentDescRefPtr_c pAgent (
ParserTestSimple ( "127.0.0.1|bla:6000:idx|/path[blackhole=1,retry_count=4,conn=pconn]", true ) );
auto &tAgent = *pAgent;
ASSERT_EQ ( tAgent.GetLength (), 3);
ASSERT_EQ ( tAgent.GetRetryLimit(), 4);
ASSERT_TRUE ( tAgent.IsHA() );
auto &tFirst = tAgent[0];
ASSERT_STREQ ( tFirst.m_sIndexes.cstr (), "idx" );
ASSERT_STREQ ( tFirst.m_sAddr.cstr (), "127.0.0.1" );
ASSERT_EQ ( tFirst.m_iFamily, AF_INET );
ASSERT_FALSE ( tFirst.m_bNeedResolve ) << "since plain IP provided, no more resolving necessary";
ASSERT_EQ ( tFirst.m_uAddr, 16777343 );
ASSERT_EQ ( tFirst.m_iPort, IANA_PORT_SPHINXAPI );
ASSERT_TRUE ( tFirst.m_bBlackhole );
ASSERT_TRUE ( tFirst.m_bPersistent );
auto &tSecond = tAgent[1];
ASSERT_STREQ ( tSecond.m_sIndexes.cstr (), "idx" );
ASSERT_STREQ ( tSecond.m_sAddr.cstr (), "bla" );
ASSERT_EQ ( tSecond.m_iFamily, AF_INET );
ASSERT_TRUE ( tSecond.m_bNeedResolve );
ASSERT_EQ ( tSecond.m_iPort, 6000 );
ASSERT_TRUE ( tSecond.m_bBlackhole );
ASSERT_TRUE ( tSecond.m_bPersistent );
auto &tThird = tAgent[2];
ASSERT_STREQ ( tThird.m_sIndexes.cstr (), "tstidx" );
ASSERT_STREQ ( tThird.m_sAddr.cstr (), "/path" );
ASSERT_EQ ( tThird.m_iFamily, AF_UNIX );
ASSERT_TRUE ( tThird.m_bBlackhole );
ASSERT_TRUE ( tThird.m_bPersistent );
}
TEST_F ( T_ConfigureMultiAgent, simple_host )
{
MultiAgentDescRefPtr_c pAgent ( ParserTestSimple ( "bla", true ));
auto &tAgent = *pAgent;
ASSERT_EQ ( tAgent.GetLength (), 1 );
ASSERT_EQ ( tAgent.GetRetryLimit (), 3 );
ASSERT_FALSE ( tAgent.IsHA () );
auto & tMirror = tAgent[0];
ASSERT_STREQ ( tMirror.m_sIndexes.cstr (), "tstidx" );
ASSERT_STREQ ( tMirror.m_sAddr.cstr (), "bla" );
ASSERT_EQ ( tMirror.m_iFamily, AF_INET );
ASSERT_TRUE ( tMirror.m_bNeedResolve );
ASSERT_EQ ( tMirror.m_iPort, IANA_PORT_SPHINXAPI );
ASSERT_FALSE ( tMirror.m_bBlackhole );
ASSERT_FALSE ( tMirror.m_bPersistent );
}
TEST_F ( T_ConfigureMultiAgent, simple_3_hosts )
{
MultiAgentDescRefPtr_c pAgent ( ParserTestSimple ( "127.0.0.1|bla|/path", true ) );
auto &tAgent = *pAgent;
ASSERT_EQ ( tAgent.GetLength (), 3 );
ASSERT_EQ ( tAgent.GetRetryLimit (), 9 );
ASSERT_TRUE ( tAgent.IsHA () );
auto &tFirst = tAgent[0];
ASSERT_STREQ ( tFirst.m_sIndexes.cstr (), "tstidx" );
ASSERT_STREQ ( tFirst.m_sAddr.cstr (), "127.0.0.1" );
ASSERT_EQ ( tFirst.m_iFamily, AF_INET );
ASSERT_FALSE ( tFirst.m_bNeedResolve ) << "since plain IP provided, no more resolving necessary";
ASSERT_EQ ( tFirst.m_uAddr, 16777343 );
ASSERT_EQ ( tFirst.m_iPort, IANA_PORT_SPHINXAPI );
ASSERT_FALSE ( tFirst.m_bBlackhole );
ASSERT_FALSE ( tFirst.m_bPersistent );
auto &tSecond = tAgent[1];
ASSERT_STREQ ( tSecond.m_sIndexes.cstr (), "tstidx" );
ASSERT_STREQ ( tSecond.m_sAddr.cstr (), "bla" );
ASSERT_EQ ( tSecond.m_iFamily, AF_INET );
ASSERT_TRUE ( tSecond.m_bNeedResolve );
ASSERT_EQ ( tSecond.m_iPort, IANA_PORT_SPHINXAPI );
ASSERT_FALSE ( tSecond.m_bBlackhole );
ASSERT_FALSE ( tSecond.m_bPersistent );
auto &tThird = tAgent[2];
ASSERT_STREQ ( tThird.m_sIndexes.cstr (), "tstidx" );
ASSERT_STREQ ( tThird.m_sAddr.cstr (), "/path" );
ASSERT_EQ ( tThird.m_iFamily, AF_UNIX );
ASSERT_FALSE ( tThird.m_bBlackhole );
ASSERT_FALSE ( tThird.m_bPersistent );
}
// staging...
// this classes are here only for tests (to avoid recompiling of a big piece in case of experiments)
// the most base class we protect.
class Core_c
{
public:
explicit Core_c ( int payload ) : m_iPayload (payload) {}
int m_iPayload = 0;
};
// keeps naked pointer to base class and give it to nobody
class Handler_t : public ISphRefcountedMT, Core_c
{
private:
friend class HandlerL_t;
Core_c * ReadLock () const ACQUIRE_SHARED( m_tLock )
{
AddRef();
m_tLock.ReadLock ();
return ( Core_c * ) this;
}
Core_c * WriteLock () const ACQUIRE( m_tLock )
{
AddRef();
m_tLock.WriteLock ();
return ( Core_c * )this;
}
void Unlock () const UNLOCK_FUNCTION( m_tLock )
{
m_tLock.Unlock ();
Release();
}
mutable RwLock_t m_tLock;
protected:
// no manual deletion; lifetime managed by AddRef/Release()
virtual ~Handler_t () = default;
public:
Handler_t ( int payload )
: Core_c { payload }
{}
};
/// RAII shared read and write lock
class SCOPED_CAPABILITY HandlerL_t : ISphNoncopyable
{
public:
// by default acquire read (shared) lock
HandlerL_t ( Handler_t * pLock ) ACQUIRE_SHARED( pLock->m_tLock )
: m_pCore { pLock->ReadLock () }, m_pLock { pLock }
{}
// acquire write (exclusive) lock
HandlerL_t ( Handler_t * pLock, bool ) ACQUIRE ( pLock->m_tLock )
: m_pCore { pLock->WriteLock () }, m_pLock { pLock }
{}
void Unlock() RELEASE ()
{
if ( m_pLock )
m_pLock->Unlock();
m_pLock = nullptr;
m_pCore = nullptr;
}
/// unlock on going out of scope
~HandlerL_t () RELEASE ()
{
if ( m_pLock )
m_pLock->Unlock();
}
HandlerL_t ( HandlerL_t && rhs ) noexcept ACQUIRE ( rhs.m_pLock->m_tLock )
: m_pCore { rhs.m_pCore }, m_pLock {rhs.m_pLock}
{}
HandlerL_t &operator= ( HandlerL_t &&rhs ) RELEASE () ACQUIRE ( rhs.m_pLock->m_tLock )
{
if ( &rhs==this )
return *this;
if ( m_pLock )
m_pLock->Unlock ();
m_pCore = rhs.m_pCore;
m_pLock = rhs.m_pLock;
rhs.m_pCore = nullptr;
rhs.m_pLock = nullptr;
return *this;
}
public:
Core_c * operator-> () const
{ return m_pCore; }
operator bool () const
{ return m_pCore!=nullptr; }
operator Core_c * () const
{ return m_pCore; }
private:
Core_c * m_pCore = nullptr;
Handler_t * m_pLock = nullptr;
};
HandlerL_t GetHandler ( Handler_t * pLock )
{
return HandlerL_t (pLock);
}
TEST ( new_addref_flavour, create_served_index_concept )
{
int payload = 10;
int payloadb = 13;
auto pFoo = new Handler_t ( payload );
auto pFee = new Handler_t ( payloadb );
ASSERT_EQ ( pFoo->GetRefcount (), 1);
// a->ReadLock();
{
HandlerL_t a = ( pFoo );
ASSERT_EQ ( a->m_iPayload, 10 );
// a = HandlerL_t ( pFee );
// a.Unlock();
HandlerL_t b ( pFee, true );
// dUser->Unlock();
// ASSERT_STREQ ( dUser->m_sIndexPath.cstr (), "blabla" );
ASSERT_EQ ( pFoo->GetRefcount (), 2 ) << "one from creation, second from RLocked";
}
HandlerL_t b ( pFoo );
pFoo->Release();
pFee->Release();
}
class TReadonlyHash_c: public ::testing::Test
{
protected:
ReadOnlyHash_T<>* pHash = nullptr;
RefCountedRefPtrGeneric_t pRef;
RefCountedRefPtrGeneric_t pNullRef;
void SetUp() override
{
pHash = new ReadOnlyHash_T<>;
pRef = new ISphRefcountedMT();
ASSERT_TRUE ( pRef->IsLast() ) << "we are the one";
}
void TearDown() override
{
SafeDelete ( pHash );
ASSERT_TRUE ( pRef->IsLast() ) << "hash deleted, we a the one";
}
};
TEST_F ( TReadonlyHash_c, AddUniq )
{
pHash->Add ( pRef, "hello" );
ASSERT_EQ ( pRef->GetRefcount(), 2 ) << "one we, second hash";
pHash->Add ( pRef, "hello" );
ASSERT_EQ ( pRef->GetRefcount(), 2 ) << "no second addition";
pHash->Add ( pRef, "world" );
ASSERT_EQ ( pRef->GetRefcount(), 3 ) << "3 of us";
}
TEST_F ( TReadonlyHash_c, AddOrReplace )
{
pHash->AddOrReplace ( pRef, "hello" );
ASSERT_EQ ( pRef->GetRefcount(), 2 ) << "one we, second hash";
pHash->AddOrReplace ( pRef, "hello" );
ASSERT_EQ ( pRef->GetRefcount(), 2 ) << "no second addition";
pHash->AddOrReplace ( pRef, "world" );
ASSERT_EQ ( pRef->GetRefcount(), 3 ) << "3 of us";
}
TEST_F ( TReadonlyHash_c, Delete )
{
// prepare
pHash->Add ( pRef, "hello" );
pHash->Add ( pRef, "world" );
EXPECT_EQ ( pRef->GetRefcount(), 3 );
EXPECT_EQ ( pHash->GetLength(), 2 );
// the test
pHash->Delete ( "hello" );
ASSERT_EQ ( pRef->GetRefcount(), 2 ) << "no second addition";
ASSERT_EQ ( pHash->GetLength(), 1 );
pHash->Delete ( "world" );
ASSERT_TRUE ( pRef->IsLast() ) << "3 of us";
ASSERT_EQ ( pHash->GetLength(), 0 );
}
TEST_F ( TReadonlyHash_c, GetLength )
{
pHash->Add ( pNullRef, "hello" );
EXPECT_EQ ( pHash->GetLength(), 1 );
pHash->Add ( pNullRef, "world" );
EXPECT_EQ ( pHash->GetLength(), 2 );
pHash->Delete ( "hello" );
ASSERT_EQ ( pHash->GetLength(), 1 );
pHash->Delete ( "world" );
ASSERT_EQ ( pHash->GetLength(), 0 );
}
TEST_F ( TReadonlyHash_c, Contains )
{
pHash->Add ( pNullRef, "hello" );
pHash->Add ( pRef, "world" );
EXPECT_EQ ( pRef->GetRefcount(), 2 );
ASSERT_FALSE ( pHash->Contains ( "foo" ) ) << "foo wasn't inserted";
ASSERT_TRUE ( pHash->Contains ( "hello" ) ) << "hello has null value";
ASSERT_TRUE ( pHash->Contains ( "world" ) ) << "world is ok";
ASSERT_EQ ( pRef->GetRefcount(), 2 ) << "contains does'nt affect ref at all";
}
TEST_F ( TReadonlyHash_c, ReleaseAndClear )
{
pHash->Add ( pNullRef, "hello" );
pHash->Add ( pRef, "world" );
EXPECT_EQ ( pRef->GetRefcount(), 2 );
EXPECT_EQ ( pHash->GetLength(), 2 );
pHash->ReleaseAndClear();
ASSERT_EQ ( pHash->GetLength(), 0 ) << "hash must be anandoned";
ASSERT_TRUE ( pRef->IsLast() ) << "me was removed";
}
TEST_F ( TReadonlyHash_c, Get )
{
pHash->Add ( pNullRef, "hello" );
pHash->Add ( pRef, "world" );
EXPECT_EQ ( pRef->GetRefcount(), 2 );
{ // get unexistent
cRefCountedRefPtrGeneric_t pFoo { pHash->Get ( "foo" ) };
ASSERT_FALSE ( pFoo ) << "wasn't in hash";
EXPECT_TRUE ( !pFoo ) << "wasn't in hash";
EXPECT_TRUE ( pFoo == nullptr ) << "wasn't in hash";
ASSERT_EQ ( pRef->GetRefcount(), 2 );
}
ASSERT_EQ ( pRef->GetRefcount(), 2 );
{ // get null
cRefCountedRefPtrGeneric_t pFoo { pHash->Get ( "hello" ) };
ASSERT_FALSE ( bool ( pFoo ) ) << "null in hash";
ASSERT_EQ ( pRef->GetRefcount(), 2 );
}
ASSERT_EQ ( pRef->GetRefcount(), 2 );
{ // get existing
cRefCountedRefPtrGeneric_t pFoo { pHash->Get ( "world" ) };
ASSERT_EQ ( pFoo->GetRefcount(), 3 );
ASSERT_EQ ( pRef->GetRefcount(), 3 );
}
ASSERT_EQ ( pRef->GetRefcount(), 2 );
}
bool operator==( const ListenerDesc_t& lhs, const ListenerDesc_t& rhs )
{
return lhs.m_eProto==rhs.m_eProto
&& lhs.m_bVIP == rhs.m_bVIP
&& lhs.m_bReadOnly == rhs.m_bReadOnly
&& lhs.m_iPort == rhs.m_iPort
&& lhs.m_iPortsCount == rhs.m_iPortsCount
&& lhs.m_uIP == rhs.m_uIP
&& lhs.m_sUnix == rhs.m_sUnix;
}
// simple IP, no port, default port - success
TEST ( ParseListener, simple_ip_no_port )
{
struct { const char* sSpec; ListenerDesc_t sRes; } dTable[] = {
{"8.8.8.8:1000", { Proto_e::SPHINX, "", "", 134744072, 1000, 0, false }},
{"1000", { Proto_e::SPHINX, "", "", 0, 1000, 0, false }},
{"/linux/host", { Proto_e::SPHINX, "/linux/host", "", 0, 9312, 0, false }},
{"8.8.8.8:1000-10000", { Proto_e::SPHINX, "", "", 134744072, 1000, 9001, false }},
{"8.8.8.8:1000:sphinx", { Proto_e::SPHINXSE, "", "", 134744072, 1000, 0, false }},
{"1000:sphinx", { Proto_e::SPHINXSE, "", "", 0, 1000, 0, false }},
{"/linux/host:sphinx", { Proto_e::SPHINXSE, "/linux/host", "", 0, 9312, 0, false }},
{"8.8.8.8:1000-10000:sphinx", { Proto_e::SPHINXSE, "", "", 134744072, 1000, 9001, false }},
{"8.8.8.8:1000:mysql41", { Proto_e::MYSQL41, "", "", 134744072, 1000, 0, false }},
{"1000:mysql41", { Proto_e::MYSQL41, "", "", 0, 1000, 0, false }},
{"/linux/host:mysql41", { Proto_e::MYSQL41, "/linux/host", "", 0, 9312, 0, false }},
{"8.8.8.8:1000-10000:mysql41", { Proto_e::MYSQL41, "", "", 134744072, 1000, 9001, false }},
{"8.8.8.8:1000:http", { Proto_e::HTTP, "", "", 134744072, 1000, 0, false }},
{"1000:http", { Proto_e::HTTP, "", "", 0, 1000, 0, false }},
{"/linux/host:http", { Proto_e::HTTP, "/linux/host", "", 0, 9312, 0, false }},
{"8.8.8.8:1000-10000:http", { Proto_e::HTTP, "", "", 134744072, 1000, 9001, false }},
{"8.8.8.8:1000:replication", { Proto_e::REPLICATION, "", "", 134744072, 1000, 0, false }},
{"1000:replication", { Proto_e::REPLICATION, "", "", 0, 1000, 0, false }},
{"/linux/host:replication", { Proto_e::REPLICATION, "/linux/host", "", 0, 9312, 0, false }},
{"8.8.8.8:1000-10000:replication", { Proto_e::REPLICATION, "", "", 134744072, 1000, 9001, false }},
{"8.8.8.8:1000:sphinx_vip", { Proto_e::SPHINXSE, "", "", 134744072, 1000, 0, true }},
{"1000:sphinx_vip", { Proto_e::SPHINXSE, "", "", 0, 1000, 0, true }},
{"/linux/host:sphinx_vip", { Proto_e::SPHINXSE, "/linux/host", "", 0, 9312, 0, true }},
{"8.8.8.8:1000-10000:sphinx_vip", { Proto_e::SPHINXSE, "", "", 134744072, 1000, 9001, true }},
{"8.8.8.8:1000:mysql41_vip", { Proto_e::MYSQL41, "", "", 134744072, 1000, 0, true }},
{"1000:mysql41_vip", { Proto_e::MYSQL41, "", "", 0, 1000, 0, true }},
{"/linux/host:mysql41_vip", { Proto_e::MYSQL41, "/linux/host", "", 0, 9312, 0, true }},
{"8.8.8.8:1000-10000:mysql41_vip", { Proto_e::MYSQL41, "", "", 134744072, 1000, 9001, true }},
{"8.8.8.8:1000:http_vip", { Proto_e::HTTP, "", "", 134744072, 1000, 0, true }},
{"1000:http_vip", { Proto_e::HTTP, "", "", 0, 1000, 0, true }},
{"/linux/host:http_vip", { Proto_e::HTTP, "/linux/host", "", 0, 9312, 0, true }},
{"8.8.8.8:1000-10000:http_vip", { Proto_e::HTTP, "", "", 134744072, 1000, 9001, true }},
{"8.8.8.8:1000:replication_vip", { Proto_e::REPLICATION, "", "", 134744072, 1000, 0, true }},
{"1000:replication_vip", { Proto_e::REPLICATION, "", "", 0, 1000, 0, true }},
{"/linux/host:replication_vip", { Proto_e::REPLICATION, "/linux/host", "", 0, 9312, 0, true }},
{"8.8.8.8:1000-10000:replication_vip", { Proto_e::REPLICATION, "", "", 134744072, 1000, 9001, true }},
};
for (const auto& sCase : dTable)
{
#if _WIN32
if ( sCase.sSpec[0]=='/' ) // skip of UNIX socket cases on Windows
continue;
#endif
ListenerDesc_t tDesc = ParseListener( sCase.sSpec );
EXPECT_TRUE ( tDesc==sCase.sRes ) << sCase.sSpec;
}
}
TEST_F ( DeathLogger_c, ParseListener_wrong_port )
{
struct
{
const char* sSpec;
ListenerDesc_t sRes;
} dTable[] = {
{ "8.8.8.8:65536", { Proto_e::SPHINX, "", 0, 9306, 0, false }},
{ "65536", { Proto_e::SPHINX, "", 0, 9306, 0, false }},
{ "8.8.8.8:1000-65536", { Proto_e::SPHINX, "", 0, 1000, 0, false }},
{ "8.8.8.8:65536:sphinx", { Proto_e::SPHINX, "", 0, 9306, 0, false }},
{ "65536:sphinx", { Proto_e::SPHINX, "", 0, 9306, 0, false }},
{ "8.8.8.8:1000-65536:sphinx", { Proto_e::SPHINX, "", 0, 9306, 0, false }},
{ "8.8.8.8:65536:mysql41", { Proto_e::MYSQL41, "", 0, 9306, 0, false }},
{ "65536:mysql41", { Proto_e::MYSQL41, "", 0, 9306, 0, false }},
{ "8.8.8.8:1000-65536:mysql41", { Proto_e::MYSQL41, "", 0, 9306, 0, false }},
{ "8.8.8.8:65536:http", { Proto_e::HTTP, "", 0, 9306, 0, false }},
{ "65536:http", { Proto_e::HTTP, "", 0, 9306, 0, false }},
{ "8.8.8.8:1000-65536:http", { Proto_e::HTTP, "", 0, 9306, 0, false }},
{ "8.8.8.8:65536:replication", { Proto_e::REPLICATION, "", 0, 9306, 0, false }},
{ "65536:replication", { Proto_e::REPLICATION, "", 0, 9306, 0, false }},
{ "8.8.8.8:1000-65536:replication", { Proto_e::REPLICATION, "", 0, 9306, 0, false }},
{ "8.8.8.8:65536:sphinx_vip", { Proto_e::SPHINX, "", 0, 9306, 0, false }},
{ "65536:sphinx_vip", { Proto_e::SPHINX, "", 0, 9306, 0, false }},
{ "8.8.8.8:1000-65536:sphinx_vip", { Proto_e::SPHINX, "", 0, 9306, 0, false }},
{ "8.8.8.8:65536:mysql41_vip", { Proto_e::MYSQL41, "", 0, 9306, 0, false }},
{ "65536:mysql41_vip", { Proto_e::MYSQL41, "", 0, 9306, 0, false }},
{ "8.8.8.8:1000-65536:mysql41_vip", { Proto_e::MYSQL41, "", 0, 9306, 0, false }},
{ "8.8.8.8:65536:http_vip", { Proto_e::HTTP, "", 0, 9306, 0, false }},
{ "65536:http_vip", { Proto_e::HTTP, "", 0, 9306, 0, false }},
{ "8.8.8.8:1000-65536:http_vip", { Proto_e::HTTP, "", 0, 9306, 0, false }},
{ "8.8.8.8:65536:replication_vip", { Proto_e::REPLICATION, "", 0, 9306, 0, false }},
{ "65536:replication_vip", { Proto_e::REPLICATION, "", 0, 9306, 0, false }},
{ "8.8.8.8:1000-65536:replication_vip", { Proto_e::REPLICATION, "", 0, 9306, 0, false }},
};
for ( const auto& sCase : dTable )
{
CSphString sFatal;
ParseListener ( sCase.sSpec, &sFatal );
EXPECT_STREQ ( "port 65536 is out of range", sFatal.cstr() );
}
}
| 25,584
|
C++
|
.cpp
| 688
| 34.886628
| 119
| 0.659329
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,997
|
gtests_globalstate.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_globalstate.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <gtest/gtest.h>
#include "sphinxint.h"
#include "threadutils.h"
#include "tracer.h"
// global stuff
const char * g_sTmpfile = "__libsphinxtest.tmp";
const char * g_sMagickTmpfile = "__libsphinxtest2.tmp";
const char * g_sMagic = "\xD1\x82\xD0\xB5\xD1\x81\xD1\x82\xD1\x82\xD1\x82";
bool CreateSynonymsFile ( const char * sMagic = nullptr )
{
FILE * fp = fopen ( sMagic ? g_sMagickTmpfile : g_sTmpfile, "w+" );
if ( !fp )
return false;
fprintf ( fp, "AT&T => AT&T\n"
" AT & T => AT & T \n"
"standarten fuehrer => Standartenfuehrer\n"
"standarten fuhrer => Standartenfuehrer\n"
"OS/2 => OS/2\n"
"Ms-Dos => MS-DOS\n"
"MS DOS => MS-DOS\n"
"feat. => featuring\n"
"U.S. => US\n"
"U.S.A. => USA\n"
"U.S.B. => USB\n"
"U.S.D. => USD\n"
"U.S.P. => USP\n"
"U.S.A.F. => USAF\n"
"life:) => life:)\n"
"; => ;\n"
);
if ( sMagic )
fprintf ( fp, "%s => test\n", sMagic );
fclose ( fp );
return true;
}
class Environment : public ::testing::Environment
{
public:
// Override this to define how to set up the environment.
void SetUp () override
{
char cTopOfMainStack;
Threads::Init ();
Threads::PrepareMainThread ( &cTopOfMainStack );
Tracer::Init();
CreateSynonymsFile ();
CreateSynonymsFile ( g_sMagic );
auto iThreads = GetNumLogicalCPUs();
// iThreads = 1; // uncomment if want to run all coro tests in single thread
SetMaxChildrenThreads ( iThreads );
StartGlobalWorkPool();
WipeGlobalSchedulerOnShutdownAndFork();
#if _WIN32
// init WSA on Windows
WSADATA wsa_data;
int wsa_startup_err;
wsa_startup_err = WSAStartup ( WINSOCK_VERSION, &wsa_data );
if ( wsa_startup_err )
printf ( "failed to initialize WinSock2: error %d", wsa_startup_err );
#endif
}
// Override this to define how to tear down the environment.
void TearDown () override
{
unlink ( g_sTmpfile );
unlink ( g_sMagickTmpfile );
StopGlobalWorkPool();
}
};
// it will create 2 synonyms file before all tests (globally), and delete them on finish.
::testing::Environment VARIABLE_IS_NOT_USED * const env = ::testing::AddGlobalTestEnvironment ( new Environment );
| 2,621
|
C++
|
.cpp
| 82
| 29.585366
| 114
| 0.684731
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,998
|
gtests_threadstuff.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_threadstuff.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <gtest/gtest.h>
#include "threadutils.h"
#include "coroutine.h"
#include "task_dispatcher.h"
#include <atomic>
void SetStderrLogger ();
// here intentionally added initialized lambda capture
// it will always show compiler warning on ancient compilers
TEST ( ThreadPool, movelambda )
{
CSphString test = "hello";
CSphString to;
const char* sOrigHello = test.cstr();
auto x = [&, line = std::move ( test )] () mutable { to = std::move(line); };
ASSERT_STREQ ( test.cstr (), NULL ) << "Line is already captured";
ASSERT_STREQ ( to.cstr (), NULL ) << "line is not yet finally moved";
x ();
ASSERT_STREQ ( to.cstr (), "hello" ) << "lambda moved captured line to here";
ASSERT_EQ ( sOrigHello, to.cstr() ) << "moved value is exactly one from the start";
}
TEST ( ThreadPool, Counter100 )
{
// SetStderrLogger ();
auto pPool = Threads::MakeThreadPool ( 4, "tp" );
auto & tPool = *pPool;
std::atomic<int> v {0};
for ( int i=0; i<100; ++i)
tPool.Schedule ([&] { ++v; }, false);
tPool.StopAll ();
ASSERT_EQ ( v, 100 );
}
void Counter100c()
{
using namespace Threads;
std::atomic<int> v {0};
CallCoroutine ( [&] {
auto dWaiter = DefferedContinuator();
for ( int i = 0; i < 100; ++i )
Coro::Co ( [&] {
v.fetch_add(1,std::memory_order_relaxed);
},
dWaiter );
WaitForDeffered ( std::move(dWaiter) );
ASSERT_EQ ( v, 100 );
} );
}
TEST ( ThreadPool, Counter100c )
{
for (auto i=0; i<100; ++i)
Counter100c();
}
const char* SH()
{
auto pSched = Threads::Coro::CurrentScheduler();
if (!pSched)
return "(null)";
return pSched->Name();
}
void Sleeper(int iMsec, const char* szName)
{
Threads::Coro::SleepMsec ( iMsec );
std::cout << szName << " run " << iMsec << "\n";
}
// this test will NOT pass in single-thread (see gtests_globalstate.cpp, if iThreads<2).
// that is because it uses ture sphSleepMsec, which effectively pauses single thread.
TEST ( ThreadPool, WaitForN )
{
using namespace Threads;
Threads::CallCoroutine ( [&] {
int N = 2;
std::cout << "test started waiting " << N << "th\n";
auto iIdx = WaitForN ( N, {
[] {
Sleeper (1500, "first");
},
[] {
Sleeper ( 500, "second" );
},
[] {
Sleeper ( 2000, "third" );
}}
);
ASSERT_EQ ( iIdx, 0 );
std::cout << "test finished, idx=" << iIdx;
});
}
TEST ( ThreadPool, strandr )
{
g_eLogLevel = SPH_LOG_VERBOSE_DEBUG;
static const int NUMS = 100;
CSphVector<int> dRes;
dRes.Reserve ( NUMS );
// SetStderrLogger ();
auto pSched = Threads::MakeThreadPool ( 1, "tp" );
auto pRandr = Threads::MakeAloneScheduler ( nullptr );
((Threads::SchedulerWithBackend_i&) *pRandr).SetBackend (pSched);
for ( int i = 0; i<NUMS; ++i )
{
pRandr->Schedule ( [i,&dRes]
{
dRes.Add(i);
sphSleepMsec ( sphRand () & 15);
}, false );
sphSleepMsec ( 10 );
}
sphSleepMsec (10);
pSched->StopAll ();
ASSERT_EQ ( dRes.GetLength(), NUMS );
ARRAY_CONSTFOREACH( i, dRes )
{
ASSERT_EQ ( i, dRes[i] );
}
}
// check if strandr is really produces sequental work
TEST ( ThreadPool, strandr2 )
{
using namespace Threads;
g_eLogLevel = SPH_LOG_VERBOSE_DEBUG;
static const int NUMS = 100;
CSphVector<int> dRes;
dRes.Reserve ( NUMS );
Threads::CallCoroutine ( [&] {
auto dWaiter = DefferedRestarter ();
RoledSchedulerSharedPtr_t pRandr = Threads::MakeAloneScheduler ( Coro::CurrentScheduler () );
Threads::ScopedScheduler_c customtp { pRandr };
for ( int i = 0; i<NUMS; ++i )
{
// commenting out line below will cause test to fail.
Coro::Co ( [&,i] {
sphSleepMsec ( sphRand () % NUMS );
dRes.Add ( i );
}, dWaiter );
}
WaitForDeffered ( std::move ( dWaiter ) );
});
ARRAY_CONSTFOREACH( i, dRes )
{
ASSERT_EQ ( i, dRes[i] );
}
ASSERT_EQ ( dRes.GetLength (), NUMS );
}
// checks that strandr is re-enterable. I.e. that Coro::Reschedule is NOT cause stack overflow.
// DISABLED because it is manual, otherwise produce many noise
/*TEST ( ThreadPool, DISABLED_strandr_reschedule )
{
using namespace Threads;
g_eLogLevel = SPH_LOG_VERBOSE_DEBUG;
static const int NUMS = 700;
Threads::CallCoroutine ( [&] {
RoledSchedulerSharedPtr_t pRandr = Threads::MakeAloneScheduler ( Coro::CurrentScheduler () );
std::cout << "enter scoped\n";
{
Threads::ScopedScheduler_c customtp { pRandr };
std::cout << "scope entered\n";
for ( int j = 0; j<NUMS; ++j )
{
Coro::Reschedule ();
std::cout << "rescheduled " << j << "\n";
}
std::cout << "done\n";
}
std::cout << "strandr escaped\n";
});
}*/
// from there it is prepatation to the next test...
struct essence_t
{
int iOwner = -1;
int iConcurrency = 0;
int iRefs = 0;
};
struct thread_t
{
Threads::RoledSchedulerSharedPtr_t strandr;
std::atomic<int> iWorks {0};
};
static const int NWORKERS = 5;
static const int NUMS = 10;
const char* names[NWORKERS] = {"w1","w2","w3","w4","w5"};
void print_owners ( thread_t * pthreads )
{
std::cout << "[";
for ( int i = 0; i<NWORKERS; ++i )
std::cout << pthreads[i].iWorks << ":" << ( pthreads[i].strandr ? "valid" : "nullptr" ) << ", ";
std::cout << "]";
}
int getworker ( int N, thread_t* pthreads )
{
using namespace Threads;
int iIdx=0;
int iWorks=1000000;
std::cout << SH () << " " << N << ": getworker: "; print_owners(pthreads);
for ( int i = 0; i<NWORKERS; ++i )
{
if ( iWorks>pthreads[i].iWorks )
{
iWorks = pthreads[i].iWorks;
iIdx = i;
}
}
if ( !pthreads[iIdx].strandr )
{
pthreads[iIdx].strandr = MakeAloneScheduler ( Coro::CurrentScheduler (), names[iIdx] );
std::cout << " new ";
}
std::cout << "give " << iIdx << ":" << iWorks << "\n";
return iIdx;
}
void bind_resource ( int N, essence_t* presource, int iResource, thread_t * pthreads, int iWorker )
{
auto& resource = presource[iResource];
if ( resource.iOwner!=iWorker )
{
if ( resource.iOwner!=-1 )
{
StringBuilder_c cout;
std::cout << SH () << " " << N << ": resource owner of " << iResource << " is fiber_" << resource.iOwner << ", wait...\n";
while ( resource.iOwner!=iWorker && resource.iOwner!=-1 )
Threads::Coro::Reschedule ();
std::cout << SH () << " " << N << ": wait of " << iResource << " done, owner is fiber_" << resource.iOwner << ".\n";
}
resource.iOwner = iWorker;
}
++resource.iRefs;
if ( resource.iRefs==1 )
++pthreads[iWorker].iWorks;
std::cout << SH () << " " << N << ": resource owner of " << iResource << " is fiber_" << iWorker << " (has " << pthreads[iWorker].iWorks << ")\n";
}
void print_refs ( essence_t* presource)
{
std::cout<<"[";
for (int i=0; i<NUMS; ++i)
{
if ( presource[i].iOwner==-1)
std::cout << "NONE, ";
else
std::cout << presource[i].iOwner << ":" << presource[i].iRefs << ", ";
}
std::cout << "]";
}
int release_resource ( int N, essence_t* presource, int i )
{
auto& resource = presource[i];
std::cout << SH () << " " << N << ": release_resource " << i << ":";
print_refs(presource);
--resource.iRefs;
int ires = 0;
if (!resource.iRefs)
{
resource.iOwner = -1;
ires = 1;
}
std::cout << " -> ";
print_refs ( presource );
std::cout<< "\n";
return ires;
}
// here is test of RT-index chunks/segs dispatcher.
/*
* We have N resources (chunks/segs) and M threads.
* We have X tasks which are starte in parallel.
*
* 1. For couple of resources in task dispatcher provides one executor from M threads.
* 2. If one of resources is busy, it provides backoff strategy (yield and resume when state changes) without bunch of
* empty CPU loops.
* 3. When executing tasks in parallel we checks that only one executor process resource in time.
* 4. Finally we check: all tasks processed; all resources released; all threads abandoned.
*
* Executing test in single thread will obviously pass it (as only one task executed in time), but in this case we
* check that the're ho deadlocks - i.e. when we wait for releasing resources, but worker which occupy them, in turn,
* waits when we release it ourselves (and since there is single thread it will be deadlock).
*
* Test is generaly disabled since it is quire long, and also quite noisy. Undisable and run manually, if necessary!
*/
/*TEST ( ThreadPool, DISABLED_strandr3 )
{
using namespace Threads;
essence_t resources[NUMS];
thread_t workers[NWORKERS];
Threads::CallCoroutine ( [&] {
auto dWaiter = DefferedRestarter ();
RoledSchedulerSharedPtr_t pRandr = Threads::MakeAloneScheduler ( Coro::CurrentScheduler (),"SH" );
std::cout << "started...\n";
int64_t iMaxTries = 0;
for ( int i=0; i<1000; ++i)
{
Coro::Co ( [&,i] {
// select couple of resources (random)
auto a = ( sphRand () % NUMS );
auto b = a;
while (b==a)
b = ( sphRand () % NUMS );
int iOwner;
// that is dispatcher. It selects ready worker, or creates new one.
// of both resources busy, it waits when their state changes and check again in loop.
{
// line below organize execution in one 'scheduler' thread.
Threads::ScopedScheduler_c customtp { pRandr };
std::cout << SH () << " " << i << ": try to work with " << a << "(" << resources[a].iOwner << ") and " << b << "("
<< resources[b].iOwner << ")\n";
bool bHasWorker = false;
int64_t iLoops = 0;
// if both resources are free - select or create worker.
// if one free - bind it to the worker of the second one.
// of both busy - backoff. Wait until any worker of both is finished and restart selector loop fro scratch.
while (!bHasWorker)
{
auto workera = resources[a].iOwner;
auto workerb = resources[b].iOwner;
bHasWorker = true;
if (workera==-1 || workerb==-1) // one or both are not owned at all - may steal it now!
{
if ( workera==-1 && workerb==-1 ) // no both owners, make new one
iOwner = getworker ( i, workers );
else // one is not owned - bind to 2-nd.
iOwner = ( workera==-1 ) ? workerb : workera;
} else {
if ( workera==workerb )
iOwner = workera;
else {
bHasWorker = false;
++iLoops;
// backoff: schedule 2 tasks, each in the worker of each resource. When any fired,
// restart the loop.
// Other way is just Coro::Reschedule(), but that will fire CPU core of dispatcher in wain.
WaitForN ( 1, {
[t=workers[workera].strandr] { ScopedScheduler_c _ { t };},
[t=workers[workerb].strandr] { ScopedScheduler_c _ { t };}
});
std::cout << SH () << ": try " << iLoops << " (" << a << " " << b << ")\n";
continue;
}
}
}
// here target thread (executor) is selected. We still works in single sheduler thread, so will bind
// resources to the executor without any locks, since there is no concurrency in current context.
iMaxTries = Max ( iMaxTries, iLoops );
std::cout << SH () << " " <<i << ": will work with " << a << "(" << resources[a].iOwner << ") and " << b << "("
<< resources[b].iOwner << ")";
if ( iLoops!=0 )
std::cout << " in " << iLoops << " tries\n";
else
std::cout << "\n";
bind_resource ( i, resources, a, workers, iOwner );
bind_resource ( i, resources, b, workers, iOwner );
std::cout << SH () << " " << i <<": -> in fiber_" << iOwner << " (" << workers[iOwner].iWorks << " tasks)";
print_refs(resources);
print_owners( workers);
std::cout<<"\n";
}
// here is wild (multi-threaded) context with concurrency.
// line below might be uncommented for sigh of randomness
// sphSleepMsec ( 50+sphRand()%100 );
// from line below we settle in one of the workers. Here is the test of how dispatcher provides us
// single resource which we will access without concurrency, because there is no intersection between
// sets of resources among different executors.
Threads::ScopedScheduler_c customtp { workers[iOwner].strandr };
std::cout << SH () << " " << i <<": " << " fiber_" << iOwner << ": " << a << " " << b << "\n";
++resources[a].iConcurrency;
++resources[b].iConcurrency;
ASSERT_EQ ( 1, resources[a].iConcurrency );
ASSERT_EQ ( 1, resources[b].iConcurrency );
std::cout << SH () << " " << i << ": " << " in use: " << resources[a].iConcurrency << "/" << resources[b].iConcurrency << "\n";
ASSERT_EQ ( 1, resources[a].iConcurrency );
ASSERT_EQ ( 1, resources[b].iConcurrency );
--resources[a].iConcurrency;
--resources[b].iConcurrency;
{ // releasing
// Threads::ScopedScheduler_c customtp { pRandr };
workers[iOwner].iWorks -= release_resource (i,resources,a) + release_resource(i,resources,b);
std::cout << SH () << " " << i << ": fiber_" << iOwner << " released "
<< a << "(" << resources[a].iRefs << ") and "
<< b << "(" << resources[b].iRefs << "), rest " << workers[iOwner].iWorks << "\n";
}
}
, dWaiter );
}
// wait all tasks to finish.
WaitForDeffered ( std::move ( dWaiter ) );
std::cout << "finished, "<< iMaxTries << " max tries\n";
// check that everything is correctly released.
for ( auto & resource: resources )
{
ASSERT_EQ ( 0, resource.iConcurrency );
ASSERT_EQ ( -1, resource.iOwner );
ASSERT_EQ ( 0, resource.iRefs );
}
for ( auto & worker : workers )
ASSERT_EQ ( 0, worker.iWorks );
} );
}*/
TEST ( ThreadPool, CoroPromiceFutureConcept )
{
using namespace Threads;
CallCoroutine ( [&] {
volatile int iData;
auto fnCoro = MakeCoroExecutor ( [&iData]() {
iData = 1;
Coro::Yield_();
iData = 2;
Coro::Yield_();
iData = 10;
Coro::Yield_();
iData = 16;
} );
auto fnCondition = [fnCoro = std::move ( fnCoro ), &iData] ( int& iData2 ) -> bool {
bool bRes = !fnCoro();
iData2 = iData;
return bRes;
};
int iCheck;
ASSERT_TRUE ( fnCondition ( iCheck ) );
ASSERT_EQ ( iCheck, 1 );
ASSERT_TRUE ( fnCondition ( iCheck ) );
ASSERT_EQ ( iCheck, 2 );
ASSERT_TRUE ( fnCondition ( iCheck ) );
ASSERT_EQ ( iCheck, 10 );
ASSERT_FALSE ( fnCondition ( iCheck ) );
ASSERT_EQ ( iCheck, 16 );
});
}
TEST ( Dispatcher, Trivial )
{
auto pDispatcher = Dispatcher::MakeTrivial(6, 3);
auto pWork1 = pDispatcher->MakeSource();
int iJob = -1;
ASSERT_TRUE ( pWork1->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 0 );
// wasn't consumed, same value
ASSERT_TRUE ( pWork1->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 0 );
iJob = -1;
ASSERT_TRUE ( pWork1->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 1 );
auto pWork2 = pDispatcher->MakeSource();
auto pWork3 = pDispatcher->MakeSource();
// we can make source over initial concurrency, since dispatcher is trivial
auto pWork4 = pDispatcher->MakeSource();
iJob = -1;
ASSERT_TRUE ( pWork2->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 2 );
iJob = -1;
ASSERT_TRUE ( pWork3->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 3 );
iJob = -1;
ASSERT_TRUE ( pWork4->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 4 );
iJob = -1;
ASSERT_TRUE ( pWork1->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 5 );
// should work as iJob wasn't consumed
ASSERT_TRUE ( pWork3->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 5 );
iJob = -1;
// all jobs are done
ASSERT_FALSE ( pWork3->FetchTask ( iJob ) );
}
TEST ( Dispatcher, RoundRobin_batch_1 )
{
auto pDispatcher = Dispatcher::MakeRoundRobin ( 8, 2 );
auto pFIRST = pDispatcher->MakeSource();
int iJob = -1;
ASSERT_TRUE ( pFIRST->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 0 );
// wasn't consumed, same value
ASSERT_TRUE ( pFIRST->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 0 );
iJob = -1;
ASSERT_TRUE ( pFIRST->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 2 );
iJob = -1;
ASSERT_TRUE ( pFIRST->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 4 );
auto pSECOND = pDispatcher->MakeSource();
auto pTHIRD = pDispatcher->MakeSource();
iJob = -1;
ASSERT_TRUE ( pSECOND->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 1 );
iJob = -1;
ASSERT_TRUE ( pSECOND->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 3 );
iJob = -1;
ASSERT_TRUE ( pSECOND->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 5 );
// this one is empty, will fail
iJob = -1;
ASSERT_FALSE ( pTHIRD->FetchTask ( iJob ) );
iJob = -1;
ASSERT_TRUE ( pSECOND->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 7 );
// this one is done
iJob = -1;
ASSERT_FALSE ( pSECOND->FetchTask ( iJob ) );
iJob = -1;
ASSERT_TRUE ( pFIRST->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 6 );
// this one is also done
iJob = -1;
ASSERT_FALSE ( pFIRST->FetchTask ( iJob ) );
}
TEST ( Dispatcher, RoundRobin_batch_2 )
{
auto pDispatcher = Dispatcher::MakeRoundRobin ( 8, 2, 3 );
auto pFIRST = pDispatcher->MakeSource();
int iJob = -1;
ASSERT_TRUE ( pFIRST->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 0 );
// wasn't consumed, same value
ASSERT_TRUE ( pFIRST->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 0 );
iJob = -1;
ASSERT_TRUE ( pFIRST->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 1 );
// defer first source and create 2 more
auto pSECOND = pDispatcher->MakeSource();
auto pTHIRD = pDispatcher->MakeSource();
iJob = -1;
ASSERT_TRUE ( pSECOND->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 3 );
iJob = -1;
ASSERT_TRUE ( pSECOND->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 4 );
// now query 3-rd, it is empty, will fail
iJob = -1;
ASSERT_FALSE ( pTHIRD->FetchTask ( iJob ) );
// finish with 2-nd. It has only jobs 3-5 out of 8.
iJob = -1;
ASSERT_TRUE ( pSECOND->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 5 );
// this one is done
iJob = -1;
ASSERT_FALSE ( pSECOND->FetchTask ( iJob ) );
// finish 1-st reader. It has 3 more: 2, then 6 and 7
iJob = -1;
ASSERT_TRUE ( pFIRST->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 2 );
iJob = -1;
ASSERT_TRUE ( pFIRST->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 6 );
iJob = -1;
ASSERT_TRUE ( pFIRST->FetchTask ( iJob ) );
ASSERT_EQ ( iJob, 7 );
// this one is finally done also
iJob = -1;
ASSERT_FALSE ( pFIRST->FetchTask ( iJob ) );
}
struct CheckDispatch { const char* szTemplate; int iConc; int iBatch; };
static CheckDispatch dChecks[] = {
{ nullptr, 0, 0 },
{ "", 0, 0 },
{ " ", 0, 0 },
{ "/", 0, 0 },
{ "0", 0, 0 },
{ "*", 0, 0 },
{ "0/", 0, 0 },
{ "*/", 0, 0 },
{ "/0", 0, 0 },
{ "/*", 0, 0 },
{ "*/*", 0, 0 },
{ "*/0", 0, 0 },
{ "0/0", 0, 0 },
{ "0/*", 0, 0 },
{ "13", 13, 0 },
{ "13/", 13, 0 },
{ "13/*", 13, 0 },
{ "13/0", 13, 0 },
{ "/3", 0, 3 },
{ "0/3", 0, 3 },
{ "*/3", 0, 3 },
{ "13/3", 13, 3 },
{ " 13/3", 13, 3 },
{ "13 /3", 13, 3 },
{ "13/ 3", 13, 3 },
{ "13/3 ", 13, 3 },
{ " 13 /3", 13, 3 },
{ " 13/ 3", 13, 3 },
{ " 13/3 ", 13, 3 },
{ " 13 / 3", 13, 3 },
{ " 13 /3 ", 13, 3 },
{ " 13 / 3 ", 13, 3 },
};
void Check ( const char* szTemplate, int iConc, int iBatch )
{
auto tVal = Dispatcher::ParseTemplate ( szTemplate );
ASSERT_EQ ( tVal.concurrency, iConc ) << szTemplate;
ASSERT_EQ ( tVal.batch, iBatch ) << szTemplate;
}
TEST ( Dispatcher, ParseOne )
{
for ( const auto& tCheck : dChecks )
Check ( tCheck.szTemplate, tCheck.iConc, tCheck.iBatch );
}
void CheckTwo ( const char* szTemplate, int iConcx, int iBatchx, int iConcy, int iBatchy )
{
auto tVal = Dispatcher::ParseTemplates ( szTemplate );
ASSERT_EQ ( tVal.first.concurrency, iConcx ) << szTemplate;
ASSERT_EQ ( tVal.first.batch, iBatchx ) << szTemplate;
ASSERT_EQ ( tVal.second.concurrency, iConcy ) << szTemplate;
ASSERT_EQ ( tVal.second.batch, iBatchy ) << szTemplate;
}
TEST ( Dispatcher, ParseCouple )
{
CheckTwo ( nullptr, 0, 0, 0, 0 );
CheckTwo ( "", 0, 0, 0, 0 );
for ( const auto& x : dChecks )
for ( const auto& y : dChecks )
{
StringBuilder_c sTmp;
sTmp << x.szTemplate << '+' << y.szTemplate;
CheckTwo(sTmp.cstr(),x.iConc,x.iBatch,y.iConc,y.iBatch);
}
}
| 19,966
|
C++
|
.cpp
| 621
| 29.251208
| 147
| 0.626416
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,999
|
gtests_text.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_text.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <gtest/gtest.h>
#include "sphinxint.h"
#include "fileutils.h"
#include "sphinxutils.h"
#include "sphinxstem.h"
#include "stripper/html_stripper.h"
#include <cmath>
// Miscelaneous tests mostly processing texts with many test cases: HTML Stripper, levenstein,
// wildcards, expr parser,
TEST( Text, HTMLStriper )
{
const char * sTests[][4] =
{
// source-data, index-attrs, remove-elements, expected-results
{ "<?xml broken piece of shit/>should be indexed", "", "", "should be indexed" },
{ "<?xml crazily=\"broken>shit\">still should be indexed", "", "", "shit\">still should be indexed" },
{ "MOVING? HATE TO PACK ?HIRE A TRUCK WE\'LL DO THE REST! CLICK HERE<?<?<?", "", "", "MOVING? HATE TO PACK ?HIRE A TRUCK WE'LL DO THE REST! CLICK HERE" },
{ "<html>trivial test</html>", "", "", " trivial test " },
{ "<html>lets <img src=\"g/smth.jpg\" alt=\"nice picture\">index attrs</html>", "img=alt", "", " lets nice picture index attrs " },
{ "<html> lets also<script> whatever here; a<b</script>remove scripts", "", "script, style", " lets also remove scripts" },
{ "testing in<b><font color='red'>line</font> ele<em>men</em>ts", "", "", "testing inline elements" },
{ "testing non<p>inline</h1>elements", "", "", "testing non inline elements" },
{ "testing entities&stuff", "", "", "testing entities&stuff" },
{ "testing АБВ utf encoding", "", "", "testing \xD0\x90\xD0\x91\xD0\x92 utf encoding" },
{ "testing <1 <\" <\x80 <\xe0 <\xff </3 malformed tags", "", "", "testing <1 <\" <\x80 <\xe0 <\xff </3 malformed tags" },
{ "testing comm<!--comm-->ents", "", "", "testing comments" },
{ "< > ϑ &somethingverylong; &the", "", "", "< > \xCF\x91 &somethingverylong; &the" },
{ "testing <img src=\"g/smth.jpg\" alt=\"nice picture\" rel=anotherattr junk=throwaway>inline tags vs attr indexing", "img=alt,rel", "", "testing nice picture anotherattr inline tags vs attr indexing" },
{ "this <?php $code = \"must be stripped\"; ?> away", "", "", "this away" },
{ "<a href=\"http://www.com\">content1</a>", "a=title", "", "content1" },
{ "<a href=\"http://www.com\" title=\"my test title\">content2</a>", "a=title", "", "my test title content2" },
{ "testing <img src=\"g/smth.jpg\" alt=\"nice picture\" rel=anotherattr junk=\"throwaway\">inline tags vs attr indexing", "img=alt,rel", "", "testing nice picture anotherattr inline tags vs attr indexing" },
{ "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\"><html>test</html>", "", "", " test " },
{ "<!smth \"that>can<break\"><html>test</html>", "", "", " test " },
{ "<TABLE CLASS=\"MSONORMALTABLE\" STYLE=\"BORDER-COLLAPSE: COLLAPSE; MARGIN-LEFT: ID=\"TABLE76\"><TR><TD>ohai</TD></TR></TABLE>", "", "", " ohai " },
{ "ohai2<table class", "", "", "ohai2 " },
{ "ohai<table class>3", "", "", "ohai 3" },
{ "ohai<table class >4", "", "", "ohai 4" },
{ "ohai<table class =>5", "", "", "ohai 5" },
{ "ohai<table class =\"smth><tr><td>6</td></tr></table> some more content", "", "", "ohai 6 some more content" },
{ "ohai<table nowrap class=\"a>b\">7", "", "", "ohai 7" },
{ "ohai<table nowrap class =\"a>b\">8", "", "", "ohai 8" },
{ "ohai<table nowrap class= \"a>b\">9", "", "", "ohai 9" },
{ "ohai<table now rap class=\"a>b\">10", "", "", "ohai 10" },
{ "ohai<table class = \"smth><tr><td>6</td><td class=\"test\">11</td></tr></table> gimme more", "", "", "ohai 11 gimme more" },
{ "<P ALIGN=\"LEFT STYLE=\"MARGIN:0IN 0IN .0001PT;TEXT-ALIGN:LEFT;\"><B><FONT SIZE=\"2\" FACE=\"TIMES NEW ROMAN\" STYLE=\"FONT-SIZE:10.0PT;FONT-WEIGHT:BOLD;\">Commission File Number: 333-155507", "", "", " Commission File Number: 333-155507" },
{ "<TD NOWRAP ALIGN=RIGHT STYLE=\"BORDER-BOTTOM: #000000 1PX SOLID; BORDER-TOP: #000000 1PX SOLID;\"\"><B>SGX", "", "", " SGX" },
{ "tango & cash", "", "", "tango & cash" },
{ "<font CLASS=\"MSONORMALTABLE\" STYLE=\"BORDER-COLLAPSE: COLLAPSE; MARGIN-LEFT: ID=\"TABLE76\">ahoy\"mate", "font=zzz", "", "ahoy\"mate" },
{ "ahoy<font class =>2", "font=zzz", "", "ahoy2" },
{ "ahoy<font class =\"smth><b>3</b></font>there", "font=zzz", "", "ahoy3there" },
{ "ahoy<font nowrap class=\"a>b\">4", "font=zzz", "", "ahoy4" },
{ "ahoy<font now rap class=\"a>b\">5", "font=zzz", "", "ahoy5" },
{ "ahoy<font class = \"smth><b><i>6</i><b class=\"test\">seven</b></i></font>eight", "font=zzz", "", "ahoyseveneight" },
{ "testing À № Ė1 numbers utf encoding", "", "", "testing \xC3\x80 \xE2\x84\x96 \xC4\x96\x31 numbers utf encoding" }
};
int nTests = sizeof ( sTests ) / sizeof ( sTests[0] );
for ( auto iTest = 0; iTest<nTests; ++iTest )
{
CSphString sError;
CSphHTMLStripper tStripper ( true );
ASSERT_TRUE ( tStripper.SetIndexedAttrs ( sTests[iTest][1], sError ) );
ASSERT_TRUE ( tStripper.SetRemovedElements ( sTests[iTest][2], sError ) );
CSphString sBuf ( sTests[iTest][0] );
tStripper.Strip ( ( BYTE * ) sBuf.cstr () );
ASSERT_STREQ ( sBuf.cstr (), sTests[iTest][3] ) << "test " << 1+iTest << "/" << nTests;
}
}
//////////////////////////////////////////////////////////////////////////
static int ProxyLevenshtein ( const char * sA, const char * sB )
{
auto iLenA = (int) strlen ( sA );
auto iLenB = (int) strlen ( sB );
CSphVector<int> dTmp;
return sphLevenshtein ( sA, iLenA, sB, iLenB, dTmp );
}
TEST ( Text, Levenshtein )
{
ASSERT_EQ ( ProxyLevenshtein ( "a", "b" ), 1 );
ASSERT_EQ ( ProxyLevenshtein ( "ab", "ac" ), 1 );
ASSERT_EQ ( ProxyLevenshtein ( "ac", "bc" ), 1 );
ASSERT_EQ ( ProxyLevenshtein ( "abc", "axc" ), 1 );
ASSERT_EQ ( ProxyLevenshtein ( "kitten", "sitting" ), 3 );
ASSERT_EQ ( ProxyLevenshtein ( "xabxcdxxefxgx", "1ab2cd34ef5g6" ), 6 );
ASSERT_EQ ( ProxyLevenshtein ( "cat", "cow" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "xabxcdxxefxgx", "abcdefg" ), 6 );
ASSERT_EQ ( ProxyLevenshtein ( "javawasneat", "scalaisgreat" ), 7 );
ASSERT_EQ ( ProxyLevenshtein ( "example", "samples" ), 3 );
ASSERT_EQ ( ProxyLevenshtein ( "sturgeon", "urgently" ), 6 );
ASSERT_EQ ( ProxyLevenshtein ( "levenshtein", "frankenstein" ), 6 );
ASSERT_EQ ( ProxyLevenshtein ( "distance", "difference" ), 5 );
ASSERT_EQ ( ProxyLevenshtein ( "abc", "xyz" ), 3 );
ASSERT_EQ ( ProxyLevenshtein ( "abc", "a" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "a", "abc" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "abc", "c" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "c", "abc" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "cake", "drake" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "drake", "cake" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "saturday", "sunday" ), 3 );
ASSERT_EQ ( ProxyLevenshtein ( "sunday", "saturday" ), 3 );
ASSERT_EQ ( ProxyLevenshtein ( "book", "back" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "dog", "fog" ), 1 );
ASSERT_EQ ( ProxyLevenshtein ( "foq", "fog" ), 1 );
ASSERT_EQ ( ProxyLevenshtein ( "fvg", "fog" ), 1 );
ASSERT_EQ ( ProxyLevenshtein ( "encyclopedia", "encyclopediaz" ), 1 );
ASSERT_EQ ( ProxyLevenshtein ( "encyclopediz", "encyclopediaz" ), 1 );
ASSERT_EQ ( ProxyLevenshtein ( "chukumwong", "ckwong" ), 4 );
ASSERT_EQ ( ProxyLevenshtein ( "ckwong", "chukumwong" ), 4 );
ASSERT_EQ ( ProxyLevenshtein ( "folden", "older" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "folden", "melden" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "folden", "scolded" ), 3 );
ASSERT_EQ ( ProxyLevenshtein ( "goldin", "holding" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "goldin", "soldier" ), 3 );
ASSERT_EQ ( ProxyLevenshtein ( "helden", "hielden" ), 1 );
ASSERT_EQ ( ProxyLevenshtein ( "helden", "sheldon" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "helena", "helens" ), 1 );
ASSERT_EQ ( ProxyLevenshtein ( "helena", "helllena" ), 2 );
ASSERT_EQ ( ProxyLevenshtein ( "helga", "belgrave" ), 4 );
ASSERT_EQ ( ProxyLevenshtein ( "helga", "anhel" ), 4 );
}
TEST ( sizeof_literal, text_5 )
{
ASSERT_EQ ( sizeof ("text"), 5);
}
//////////////////////////////////////////////////////////////////////////
TEST ( Wildcards, simple )
{
ASSERT_TRUE ( sphWildcardMatch ( "abc", "abc" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abc", "?bc" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abc", "a?c" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abc", "ab?" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abc", "?ab" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abac", "a*c" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abac", "a*?c" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abac", "a*??c" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abac", "a?*?c" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abac", "a*???c" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abac", "a?a?" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abac", "a?a??" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abac", "a??a" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abracadabra", "a*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abracadabra", "a*a" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abracadabra", "a*c" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abracadabra", "?b*r?" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abracadabra", "?b*r*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abracadabra", "?b*r*r*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abracadabra", "*a*a*a*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abracadabra", "*a*a*a*a*a*" ) );
ASSERT_FALSE ( sphWildcardMatch ( "a", "a*a?" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abracadabra", "*a*a*a*a*a?" ) );
ASSERT_TRUE ( sphWildcardMatch ( "car", "car%" ) );
ASSERT_TRUE ( sphWildcardMatch ( "cars", "car%" ) );
ASSERT_TRUE ( sphWildcardMatch ( "card", "car%" ) );
ASSERT_FALSE ( sphWildcardMatch ( "carded", "car%" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abc", "abc%" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abcd", "abc%" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abcde", "abc%" ) );
ASSERT_TRUE ( sphWildcardMatch ( "ab", "a%b" ) );
ASSERT_TRUE ( sphWildcardMatch ( "acb", "a%b" ) );
ASSERT_FALSE ( sphWildcardMatch ( "acdb", "a%b" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abc", "a%bc" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abbc", "a%bc" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abbbc", "a%bc" ) );
ASSERT_TRUE ( sphWildcardMatch ( "ab", "a%%b" ) );
ASSERT_TRUE ( sphWildcardMatch ( "axb", "a%%b" ) );
ASSERT_TRUE ( sphWildcardMatch ( "axyb", "a%%b" ) );
ASSERT_FALSE ( sphWildcardMatch ( "axyzb", "a%%b" ) );
ASSERT_TRUE ( sphWildcardMatch ( "a*b", "a?b" ) );
ASSERT_TRUE ( sphWildcardMatch ( "a*b", "a*b" ) );
ASSERT_TRUE ( sphWildcardMatch ( "a*b", "a\\*b" ) );
ASSERT_FALSE ( sphWildcardMatch ( "acb", "a\\*b" ) );
ASSERT_FALSE ( sphWildcardMatch ( "acdeb", "a\\*b" ) );
}
TEST ( Wildcards, recursive_slow )
{
// new cases recursive slow cases
ASSERT_FALSE ( sphWildcardMatch ( "-----this-li", "-*-*-*-" ) );
ASSERT_FALSE (
sphWildcardMatch ( "---------------------------------this-li", "-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-" ) );
ASSERT_TRUE (
sphWildcardMatch ( "---------------------------------this-li-", "-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-" ) );
ASSERT_TRUE (
sphWildcardMatch ( "---------------------------------this-li", "-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*i" ) );
ASSERT_TRUE (
sphWildcardMatch ( "---------------------------------this-li", "-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*" ) );
ASSERT_TRUE (
sphWildcardMatch ( "---------------------------------this-li", "-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*s-*i" ) );
ASSERT_FALSE (
sphWildcardMatch ( "---------------------------------this-li", "-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-s-*i" ) );
ASSERT_FALSE (
sphWildcardMatch ( "---------------------------------this-li", "-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*x-*i" ) );
ASSERT_FALSE (
sphWildcardMatch ( "--------------------------this-li--p---", "-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-z-*-" ) );
}
TEST ( Wildcards, repeating_character_sequences )
{
// cases with repeating character sequences
ASSERT_TRUE ( sphWildcardMatch ( "abcccd", "*ccd" ) );
ASSERT_TRUE ( sphWildcardMatch ( "mississipissippi", "*issip*ss*" ) );
ASSERT_FALSE ( sphWildcardMatch ( "xxxx*zzzzzzzzy*f", "xxxx*zzy*fffff" ) );
ASSERT_TRUE ( sphWildcardMatch ( "xxxx*zzzzzzzzy*f", "xxx*zzy*f" ) );
ASSERT_FALSE ( sphWildcardMatch ( "xxxxzzzzzzzzyf", "xxxx*zzy*fffff" ) );
ASSERT_TRUE ( sphWildcardMatch ( "xxxxzzzzzzzzyf", "xxxx*zzy*f" ) );
ASSERT_TRUE ( sphWildcardMatch ( "xyxyxyzyxyz", "xy*z*xyz" ) );
ASSERT_TRUE ( sphWildcardMatch ( "mississippi", "*sip*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "xyxyxyxyz", "xy*xyz" ) );
ASSERT_TRUE ( sphWildcardMatch ( "mississippi", "mi*sip*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "ababac", "*abac*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "ababac", "*abac*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "aaazz", "a*zz*" ) );
ASSERT_FALSE ( sphWildcardMatch ( "a12b12", "*12*23" ) );
ASSERT_FALSE ( sphWildcardMatch ( "a12b12", "a12b" ) );
ASSERT_TRUE ( sphWildcardMatch ( "a12b12", "*12*12*" ) );
}
TEST ( Wildcards, in_same_string )
{
// wildcard in the tame string
ASSERT_TRUE ( sphWildcardMatch ( "*", "*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "a*abab", "a*b" ) );
ASSERT_TRUE ( sphWildcardMatch ( "a*r", "a*" ) );
ASSERT_FALSE ( sphWildcardMatch ( "a*ar", "a*aar" ) );
}
TEST ( Wildcards, Double )
{
// double wildcard
ASSERT_TRUE ( sphWildcardMatch ( "XYXYXYZYXYz", "XY*Z*XYz" ) );
ASSERT_TRUE ( sphWildcardMatch ( "missisSIPpi", "*SIP*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "mississipPI", "*issip*PI" ) );
ASSERT_TRUE ( sphWildcardMatch ( "xyxyxyxyz", "xy*xyz" ) );
ASSERT_TRUE ( sphWildcardMatch ( "miSsissippi", "mi*sip*" ) );
ASSERT_FALSE ( sphWildcardMatch ( "miSsissippi", "mi*Sip*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abAbac", "*Abac*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abAbac", "*Abac*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "aAazz", "a*zz*" ) );
ASSERT_FALSE ( sphWildcardMatch ( "A12b12", "*12*23" ) );
ASSERT_TRUE ( sphWildcardMatch ( "a12B12", "*12*12*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "oWn", "*oWn*" ) );
}
TEST ( Wildcards, mixed )
{
// mixed wildcard
ASSERT_TRUE ( sphWildcardMatch ( "a", "*?" ) );
ASSERT_TRUE ( sphWildcardMatch ( "ab", "*?" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abc", "*?" ) );
}
TEST ( Wildcards, false_positives )
{
// wildcard false positives
ASSERT_FALSE ( sphWildcardMatch ( "a", "??" ) );
ASSERT_TRUE ( sphWildcardMatch ( "ab", "?*?" ) );
// due to loop just right after case '*'
// skip all the extra stars and question marks
// this case has opposite result
ASSERT_FALSE ( sphWildcardMatch ( "ab", "*?*?*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abc", "?**?*?" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abc", "?**?*&?" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abcd", "?b*??" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abcd", "?a*??" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abcd", "?**?c?" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abcd", "?**?d?" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abcde", "?*b*?*d*?" ) );
}
TEST ( Wildcards, single_char_match )
{
// single char match
ASSERT_TRUE ( sphWildcardMatch ( "bLah", "bL?h" ) );
ASSERT_FALSE ( sphWildcardMatch ( "bLaaa", "bLa?" ) );
ASSERT_TRUE ( sphWildcardMatch ( "bLah", "bLa?" ) );
ASSERT_FALSE ( sphWildcardMatch ( "bLaH", "?Lah" ) );
ASSERT_TRUE ( sphWildcardMatch ( "bLaH", "?LaH" ) );
}
TEST ( Wildcards, many )
{
// many wildcard
ASSERT_TRUE (
sphWildcardMatch ( "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"
, "a*a*a*a*a*a*aa*aaa*a*a*b" ) );
ASSERT_TRUE ( sphWildcardMatch (
"abababababababababababababababababababaacacacacacacacadaeafagahaiajakalaaaaaaaaaaaaaaaaaffafagaagggagaaaaaaaab"
, "*a*b*ba*ca*a*aa*aaa*fa*ga*b*" ) );
ASSERT_FALSE ( sphWildcardMatch (
"abababababababababababababababababababaacacacacacacacadaeafagahaiajakalaaaaaaaaaaaaaaaaaffafagaagggagaaaaaaaab"
, "*a*b*ba*ca*a*x*aaa*fa*ga*b*" ) );
ASSERT_FALSE ( sphWildcardMatch (
"abababababababababababababababababababaacacacacacacacadaeafagahaiajakalaaaaaaaaaaaaaaaaaffafagaagggagaaaaaaaab"
, "*a*b*ba*ca*aaaa*fa*ga*gggg*b*" ) );
ASSERT_TRUE ( sphWildcardMatch (
"abababababababababababababababababababaacacacacacacacadaeafagahaiajakalaaaaaaaaaaaaaaaaaffafagaagggagaaaaaaaab"
, "*a*b*ba*ca*aaaa*fa*ga*ggg*b*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "aaabbaabbaab", "*aabbaa*a*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*", "a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*" ) );
ASSERT_TRUE ( sphWildcardMatch ( "aaaaaaaaaaaaaaaaa", "*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*" ) );
ASSERT_FALSE ( sphWildcardMatch ( "aaaaaaaaaaaaaaaa", "*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*" ) );
ASSERT_FALSE ( sphWildcardMatch (
"abc*abcd*abcde*abcdef*abcdefg*abcdefgh*abcdefghi*abcdefghij*abcdefghijk*abcdefghijkl*abcdefghijklm*abcdefghijklmn"
, "abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*" ) );
ASSERT_TRUE ( sphWildcardMatch (
"abc*abcd*abcde*abcdef*abcdefg*abcdefgh*abcdefghi*abcdefghij*abcdefghijk*abcdefghijkl*abcdefghijklm*abcdefghijklmn"
, "abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abc*abcd*abcd*abc*abcd", "abc*abc*abc*abc*abc" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abc*abcd*abcd*abc*abcd*abcd*abc*abcd*abc*abc*abcd"
, "abc*abc*abc*abc*abc*abc*abc*abc*abc*abc*abcd" ) );
ASSERT_TRUE ( sphWildcardMatch ( "abc", "********a********b********c********" ) );
ASSERT_FALSE ( sphWildcardMatch ( "********a********b********c********", "abc" ) );
ASSERT_FALSE ( sphWildcardMatch ( "abc", "********a********b********b********" ) );
ASSERT_TRUE ( sphWildcardMatch ( "*abc*", "***a*b*c***" ) );
}
//////////////////////////////////////////////////////////////////////////
TEST ( Text, expression_parser )
{
CSphColumnInfo tCol;
CSphSchema tSchema;
tCol.m_sName = "id";
tCol.m_eAttrType = SPH_ATTR_BIGINT;
tSchema.AddAttr ( tCol, false );
tCol.m_sName = "aaa";
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tSchema.AddAttr ( tCol, false );
tCol.m_sName = "bbb";
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tSchema.AddAttr ( tCol, false );
tCol.m_sName = "ccc";
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tSchema.AddAttr ( tCol, false );
auto * pRow = new CSphRowitem[tSchema.GetRowSize ()];
for ( int i = 1; i<tSchema.GetAttrsCount(); i++ )
sphSetRowAttr ( pRow, tSchema.GetAttr(i).m_tLocator, i );
sphSetRowAttr ( pRow, tSchema.GetAttr(0).m_tLocator, 123 );
CSphMatch tMatch;
tMatch.m_tRowID = 123;
tMatch.m_iWeight = 456;
tMatch.m_pStatic = pRow;
struct ExprTest_t
{
const char * m_sExpr;
float m_fValue;
};
ExprTest_t dTests[] =
{
{ "ccc/2" , 1.5f }
, { "1*2*3*4*5*6*7*8*9*10" , 3628800.0f }
, { "aaa+bbb*sin(0)*ccc" , 1.0f }
, { "if(pow(sqrt(2),2)=2,123,456)", 123.0f }
, { "if(2<2,3,4)" , 4.0f }
, { "if(2>=2,3,4)" , 3.0f }
, { "pow(7,5)" , 16807.f }
, { "sqrt(3)" , 1.7320508f }
, { "log2((2+2)*(2+2))" , 4.0f }
, { "min(3,15)" , 3.0f }
, { "max(3,15)" , 15.0f }
, { "if(3<15,bbb,ccc)" , 2.0f }
, { "id+@weight" , 579.0f }
, { "abs(-3-ccc)" , 6.0f }
, { "(aaa+bbb)*(ccc-aaa)" , 6.0f }
, { "(((aaa)))" , 1.0f }
, { "aaa-bbb*ccc" , -5.0f }
, { " aaa -\tbbb *\t\t\tccc " , -5.0f }
, { "bbb+123*aaa" , 125.0f }
, { "2.000*2e+1+2" , 42.0f }
, { "3<5" , 1.0f }
, { "1 + 2*3 > 4*4" , 0.0f }
, { "aaa/-bbb" , -0.5f, }
, { "-10*-10" , 100.0f }
, { "aaa+-bbb*-5" , 11.0f }
, { "-aaa>-bbb" , 1.0f }
, { "1-aaa+2-3+4" , 3.0f }
, { "bbb/1*2/6*3" , 2.0f }
, { "(aaa+bbb)/sqrt(3)/sqrt(3)" , 1.0f }
, { "aaa-bbb-2" , -3.0f }
, { "ccc/2*4/bbb" , 3.0f }
, { "(2+(aaa*bbb))+3" , 7.0f }
};
for ( auto &dTest: dTests )
{
CSphString sError;
ExprParseArgs_t tExprArgs;
ISphExprRefPtr_c pExpr ( sphExprParse ( dTest.m_sExpr, tSchema, nullptr, sError, tExprArgs ) );
ASSERT_TRUE ( pExpr.Ptr () ) << "parsing " << dTest.m_sExpr << ":" << sError.cstr ();
ASSERT_FLOAT_EQ ( dTest.m_fValue, pExpr->Eval ( tMatch ) );
}
SafeDeleteArray ( pRow );
}
TEST ( Text, expression_parser_many )
{
CSphColumnInfo tCol;
CSphSchema tSchema;
tCol.m_sName = "id";
tCol.m_eAttrType = SPH_ATTR_BIGINT;
tSchema.AddAttr ( tCol, false );
tCol.m_sName = "aaa";
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tSchema.AddAttr ( tCol, false );
tCol.m_sName = "bbb";
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tSchema.AddAttr ( tCol, false );
tCol.m_sName = "ccc";
tCol.m_eAttrType = SPH_ATTR_INTEGER;
tSchema.AddAttr ( tCol, false );
auto * pRow = new CSphRowitem[tSchema.GetRowSize ()];
for ( int i = 1; i<tSchema.GetAttrsCount(); i++ )
sphSetRowAttr ( pRow, tSchema.GetAttr(i).m_tLocator, i );
sphSetRowAttr ( pRow, tSchema.GetAttr(0).m_tLocator, 123 );
CSphMatch tMatch;
tMatch.m_tRowID = 123;
tMatch.m_iWeight = 456;
tMatch.m_pStatic = pRow;
const char* ppTests[] =
{
"ccc/2+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "1*2*3*4*5*6*7*8*9*10+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "aaa+bbb*sin(0)*ccc+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "if(pow(sqrt(2),2)=2,123,456)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "if(2<2,3,4)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "if(2>=2,3,4)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "pow(7,5)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "sqrt(3)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "log2((2+2)*(2+2))+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "min(3,15)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "max(3,15)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "if(3<15,bbb,ccc)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40"
, "id+@weight+if(3<15,bbb,ccc)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa"
, "abs(-3-ccc)+id+@weight+if(3<15,bbb,ccc)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa"
, "(aaa+bbb)*(ccc-aaa)+abs(-3-ccc)+id+@weight+if(3<15,bbb,ccc)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()"
, "(((aaa)))+(aaa+bbb)*(ccc-aaa)+abs(-3-ccc)+id+@weight+if(3<15,bbb,ccc)+aaa+bbb+rand()+ccc+id+12*1562 mod 40+aaa+bbb+rand()"
, "aaa-bbb*ccc+(((aaa)))+(aaa+bbb)*(ccc-aaa)+abs(-3-ccc)+id+@weight+if(3<15,bbb,ccc)+aaa+bbb+rand()+ccc+id+12*1562"
, " aaa -\tbbb *\t\t\tccc + aaa-bbb*ccc+(((aaa)))+(aaa+bbb)*(ccc-aaa)+abs(-3-ccc)+id+@weight+if(3<15,bbb,ccc)+aaa+bbb"
, "bbb+123*aaa+ aaa -\tbbb *\t\t\tccc + aaa-bbb*ccc+(((aaa)))+(aaa+bbb)*(ccc-aaa)+abs(-3-ccc)+id+@weight"
, "2.000*2e+1+2+bbb+123*aaa+ aaa -\tbbb *\t\t\tccc + aaa-bbb*ccc+(((aaa)))+(aaa+bbb)*(ccc-aaa)+abs(-3-ccc)+id+@weight"
, "3<5+2.000*2e+1+2+bbb+123*aaa+ aaa -\tbbb *\t\t\tccc + aaa-bbb*ccc+(((aaa)))+(aaa+bbb)*(ccc-aaa)+abs(-3-ccc)+id+@weight"
, "1 + 2*3 > 4*4+3<5+2.000*2e+1+2+bbb+123*aaa+ aaa -\tbbb *\t\t\tccc + aaa-bbb*ccc+(((aaa)))+(aaa+bbb)*(ccc-aaa)"
, "aaa/-bbb+1 + 2*3 > 4*4+3<5+2.000*2e+1+2+bbb+123*aaa+ aaa -\tbbb *\t\t\tccc + aaa-bbb*ccc+(((aaa)))+(aaa+bbb)*(ccc-aaa)"
, "-10*-10+aaa/-bbb+1 + 2*3 > 4*4+3<5+2.000*2e+1+2+bbb+123*aaa+ aaa -\tbbb *\t\t\tccc + aaa-bbb*ccc"
, "aaa+-bbb*-5+-10*-10+aaa/-bbb+1 + 2*3 > 4*4+3<5+2.000*2e+1+2+bbb+123*aaa+ aaa -\tbbb *\t\t\tccc + aaa-bbb*ccc"
, "-aaa>-bbb+aaa+-bbb*-5+-10*-10+aaa/-bbb+1 + 2*3 > 4*4+3<5+2.000*2e+1+2+bbb+123*aaa+ aaa -\tbbb *\t\t\tccc"
, "1-aaa+2-3+4-aaa>-bbb+aaa+-bbb*-5+-10*-10+aaa/-bbb+1 + 2*3 > 4*4+3<5+2.000*2e+1+2+bbb+123*aaa+ aaa -\tbbb *\t\t\tccc"
, "bbb/1*2/6*3+1-aaa+2-3+4-aaa>-bbb+aaa+-bbb*-5+-10*-10+aaa/-bbb+1 + 2*3 > 4*4+3<5+2.000*2e+1+2+bbb+123*aaa+ aaa "
, "(aaa+bbb)/sqrt(3)/sqrt(3)+bbb/1*2/6*3+1-aaa+2-3+4-aaa>-bbb+aaa+-bbb*-5+-10*-10+aaa/-bbb+1 + 2*3 > 4*4+3<5+2.000"
, "aaa-bbb-2+(aaa+bbb)/sqrt(3)/sqrt(3)+bbb/1*2/6*3+1-aaa+2-3+4-aaa>-bbb+aaa+-bbb*-5+-10*-10+aaa/-bbb+1 + 2*3"
, "ccc mod 10+aaa-bbb-2+(aaa+bbb)/sqrt(3)/sqrt(3)+bbb/1*2/6*3+1-aaa+2-3+4-aaa>-bbb+aaa+-bbb*-5+-10*-10+aaa/-bbb+1 + 2*3"
, "ccc/2*4/bbb+ccc mod 10+aaa-bbb-2+(aaa+bbb)/sqrt(3)/sqrt(3)+bbb/1*2/6*3+1-aaa+2-3+4-aaa>-bbb+aaa+-bbb*-5"
, "(2+(aaa*bbb))+3+ccc/2*4/bbb+ccc mod 10+aaa-bbb-2+(aaa+bbb)/sqrt(3)/sqrt(3)+bbb/1*2/6*3+1-aaa+2-3+4-aaa>-bbb+aaa+-bbb*-5"
, "aaa+bbb*(ccc)-1+(2+(aaa*bbb))+3+ccc/2*4/bbb+ccc mod 10+aaa-bbb-2+(aaa+bbb)/sqrt(3)/sqrt(3)+bbb/1*2/6*3+1"
, "aaa+bbb*ccc*2-3/4*5/6*bbb+aaa+bbb*(ccc)-1+(2+(aaa*bbb))+3+ccc/2*4/bbb+ccc mod 10+aaa-bbb-2+(aaa+bbb)/sqrt(3)/sqrt(3)"
, "sqrt(2)+aaa+bbb*ccc*2-3/4*5/6*bbb+aaa+bbb*(ccc)-1+(2+(aaa*bbb))+3+ccc/2*4/bbb+ccc mod 10+aaa-bbb-2"
, "rand()+sqrt(2)+aaa+bbb*ccc*2-3/4*5/6*bbb+aaa+bbb*(ccc)-1+(2+(aaa*bbb))+3+ccc/2*4/bbb+ccc mod 10+aaa-bbb-2"
, "rand()+sqrt(2)+aaa+bbb*ccc*2-3/4*5/6*bbb+aaa+bbb*(ccc)-1+(2+(aaa*bbb))+3+ccc/2*4/bbb+ccc mod 10+aaa-bbb-2"
, "FIBONACCI(4)+rand()+sqrt(2)+aaa+bbb*ccc*2-3/4*5/6*bbb+aaa+bbb*(ccc)-1+(2+(aaa*bbb))+3+ccc/2*4/bbb+ccc mod 10+aaa-bbb-2"
};
CSphVector<CSphString> dTests;
for ( const auto* szTest : ppTests )
dTests.Add ( szTest );
for ( const auto & sTest : dTests )
{
CSphString sError;
ExprParseArgs_t tExprArgs;
ISphExprRefPtr_c pExpr ( sphExprParse ( sTest.cstr (), tSchema, nullptr, sError, tExprArgs ) );
ASSERT_TRUE ( pExpr.Ptr () ) << sError.cstr () << ": " << sTest.cstr();
}
SafeDeleteArray ( pRow );
}
//////////////////////////////////////////////////////////////////////////
TEST ( Text, ArabicStemmer )
{
// a few words, cross-verified using NLTK implementation
const char * dTests[] =
{
"\xd8\xb0\xd9\x87\xd8\xa8\xd8\xaa\0", "\xd8\xb0\xd9\x87\xd8\xa8\0",
"\xd8\xa7\xd9\x84\xd8\xb7\xd8\xa7\xd9\x84\xd8\xa8\xd8\xa9\0", "\xd8\xb7\xd9\x84\xd8\xa8\0",
"\xd8\xa7\xd9\x84\xd8\xb5\xd8\xba\xd9\x8a\xd8\xb1\xd8\xa9\0", "\xd8\xb5\xd8\xba\xd8\xb1\0",
"\xd8\xa7\xd9\x84\xd9\x89\0", "\xd8\xa7\xd9\x84\xd9\x89\0",
"\xd8\xa7\xd9\x84\xd9\x85\xd8\xaf\xd8\xb1\xd8\xb3\xd8\xa9\0", "\xd8\xaf\xd8\xb1\xd8\xb3\0",
"\xd9\x88\xd8\xaf\xd8\xb1\xd8\xb3\xd8\xaa\0", "\xd8\xaf\xd8\xb1\xd8\xb3\0",
"\xd8\xa7\xd9\x84\xd8\xaf\xd8\xb1\xd9\x88\xd8\xb3\0", "\xd8\xaf\xd8\xb1\xd8\xb3\0",
"\xd8\xac\xd9\x85\xd9\x8a\xd8\xb9\xd9\x87\xd8\xa7\0", "\xd8\xac\xd9\x85\xd8\xb9\0",
"\xd9\x88\xd8\xad\xd9\x8a\xd9\x86\0", "\xd9\x88\xd8\xad\xd9\x86\0",
// "\xd8\xac\xd8\xa7\xd8\xa1\0", "\xd8\xac\xd8\xa7\xd8\xa1\0",
"\xd9\x88\xd9\x82\xd8\xaa\0", "\xd9\x88\xd9\x82\xd8\xaa\0",
// "\xd8\xa7\xd9\x84\xd8\xa7\xd8\xae\xd8\xaa\xd8\xa8\xd8\xa7\xd8\xb1\0", "\xd8\xae\xd8\xa8\xd8\xb1\0",
"\xd9\x86\xd8\xac\xd8\xad\xd8\xaa\0", "\xd9\x86\xd8\xac\xd8\xad\0",
"\xd8\xb7\xd8\xa7\xd9\x84\xd8\xa8\xd8\xaa\xd9\x86\xd8\xa7\0", "\xd8\xb7\xd9\x84\xd8\xa8\0",
"\xd8\xa8\xd8\xa7\xd9\x85\xd8\xaa\xd9\x8a\xd8\xa7\xd8\xb2\0", "\xd9\x85\xd9\x8a\xd8\xb2\0",
"\xd8\xa7\xd9\x84\xd9\x85\xd8\xaf\xd8\xa7\xd8\xb1\xd8\xb3\0", "\xd8\xaf\xd8\xb1\xd8\xb3\0",
"\xd9\x84\xd9\x87\xd8\xa7\0", "\xd9\x84\xd9\x87\xd8\xa7\0",
"\xd8\xaf\xd9\x88\xd8\xb1\0", "\xd8\xaf\xd9\x88\xd8\xb1\0",
"\xd9\x83\xd8\xa8\xd9\x8a\xd8\xb1\0", "\xd9\x83\xd8\xa8\xd8\xb1\0",
"\xd9\x81\xd9\x8a\0", "\xd9\x81\xd9\x8a\0",
"\xd8\xaa\xd8\xb9\xd9\x84\xd9\x8a\xd9\x85\0", "\xd8\xb9\xd9\x84\xd9\x85\0",
"\xd8\xa7\xd8\xa8\xd9\x86\xd8\xa7\xd9\x8a\xd9\x86\xd8\xa7\0", "\xd8\xa8\xd9\x86\xd9\x8a\0",
// "\xd8\xa7\xd9\x84\xd8\xa7\xd8\xad\xd8\xa8\xd8\xa7\xd8\xa1\0", "\xd8\xad\xd8\xa8\xd8\xa1\0",
};
for ( int i = 0; i<int ( sizeof ( dTests ) / sizeof ( dTests[0] ) ); i += 2 )
{
char sBuf[64];
snprintf ( sBuf, sizeof ( sBuf ), "%s", dTests[i] );
stem_ar_utf8 ( ( BYTE * ) sBuf );
ASSERT_STREQ ( sBuf, dTests[i + 1] );
}
char sTest1[16] = "\xD9\x80\xD9\x80\xD9\x80\xD9\x80\0abcdef";
char sRef1[16] = "\0\0\0\0\0\0\0\0\0abcdef";
stem_ar_utf8 ( ( BYTE * ) sTest1 );
ASSERT_FALSE ( memcmp ( sTest1, sRef1, sizeof ( sTest1 ) ) );
char sTest2[] = "\xd8\xa7\xd9\x84\xd8\xb7\xd8\xa7\xd9\x84\xd8\xa8\xd8\xa9\0";
char sRef2[] = "\xd8\xb7\xd9\x84\xd8\xa8\0";
CSphTightVector<BYTE> dTest22;
dTest22.Resize ( sizeof ( sTest2 ) );
for ( int i = 0; i<10; i++ )
{
dTest22.Resize ( dTest22.GetLength () * 2 );
int iOff = dTest22.GetLength () - sizeof ( sTest2 );
memcpy ( dTest22.Begin () + iOff, sTest2, sizeof ( sTest2 ) );
stem_ar_utf8 ( dTest22.Begin () + iOff );
ASSERT_FALSE ( memcmp ( dTest22.Begin () + iOff, sRef2, sizeof ( sRef2 ) ) );
}
}
//////////////////////////////////////////////////////////////////////////
#include "indexing_sources/source_svpipe.h"
TEST ( Text, cvs_source )
{
int iWriteStride = 7;
const char * dTest[] = {
"1,\"a,b \"\" c\",\"d \"\"a\"\" c\",\"the\tdox\n fox\",tmp,tmp,tmp,11\n",
"a,b \" c", "d \"a\" c", "the\tdox\n fox", "tmp", "tmp", "tmp",
"2,\"abc, defghijk. \"Lmnopqrs, \"tuv,\"\" wxyz.\",...,tmp,tmp,tmp,11\n",
"abc, defghijk. Lmnopqrs", " tuv,\" wxyz.", "...", "tmp", "tmp", "tmp",
"3,\",\",\"\",tmp,tmp,tmp,tmp,11\n",
",", "", "tmp", "tmp", "tmp", "tmp",
"4,\"Sup, \"\"puper\"\", duper\",tmp,tmp,tmp,tmp,tmp,11\n",
"Sup, \"puper\", duper", "tmp", "tmp", "tmp", "tmp", "tmp",
"5,\"Sup, \"\"puper\"\" duper\",tmp,tmp,tmp,tmp,tmp,11\n",
"Sup, \"puper\" duper", "tmp", "tmp", "tmp", "tmp", "tmp",
"6,\"Sup, \"\"puper\"\"\","",tmp,tmp,tmp,tmp,11\n",
"Sup, \"puper\"", "", "tmp", "tmp", "tmp", "tmp",
"7,\"Sup, \"\"puper, duper\"\"\",,tmp,tmp,tmp,tmp,11\n",
"Sup, \"puper, duper\"", "", "tmp", "tmp", "tmp", "tmp",
"8,cool,so far,\"Sup\n extra, duper,\",tmp,tmp,tmp,11\n",
"cool", "so far", "Sup\n extra, duper,", "tmp", "tmp", "tmp",
"9,//\\\\match//\\\\,//\\\\double//\\\\,//\\\\escape//\\\\,tmp,tmp,tmp,11\n",
"//\\match//\\", "//\\double//\\", "//\\escape//\\", "tmp", "tmp", "tmp",
"10,ma\\\"tch,me,ten\\\"der,tmp,tmp,tmp,11\n",
"ma\"tch", "me", "ten\"der", "tmp", "tmp", "tmp",
"11,"
"test fest \\\" best"
",tmp,tmp,tmp,tmp,tmp,11\n",
"test fest \" best",
"tmp", "tmp", "tmp", "tmp", "tmp",
"12,"
"\"test fest \\\" be\"st\""
",tmp,tmp,tmp,tmp,tmp,11\n",
"test fest \\ be\"st\"",
"tmp", "tmp", "tmp", "tmp", "tmp",
"13,"
"\"test fest, be\"st of, the\""
",tmp,tmp,tmp,tmp,11\n",
"test fest, best of", " the\"",
"tmp", "tmp", "tmp", "tmp",
"14,"
"\"test fest, best of, th\"e"
",tmp,tmp,tmp,tmp,tmp,11\n",
"test fest, best of, the",
"tmp", "tmp", "tmp", "tmp", "tmp",
"15,"
"\"test fest\\, best of th\"e"
",tmp,tmp,tmp,tmp,tmp,11\n",
"test fest\\, best of the",
"tmp", "tmp", "tmp", "tmp", "tmp",
"16,"
"test \"fest\\, best of th\"e"
",tmp,tmp,tmp,tmp,11\n",
"test \"fest", " best of th\"e",
"tmp", "tmp", "tmp", "tmp",
NULL
};
const char * cvs_tmpfile = "__libsphinxtestcvs.tmp";
// write csv file
FILE * fp = fopen ( cvs_tmpfile, "wb" );
for ( int iTest = 0; dTest[iTest]!=NULL; iTest += iWriteStride )
fwrite ( dTest[iTest], 1, strlen ( dTest[iTest] ), fp );
fclose ( fp );
// open csv pipe
fp = fopen ( cvs_tmpfile, "rb" );
// make config for 6 fields and attribute
CSphConfigSection tConf;
ASSERT_TRUE ( tConf.Add ( CSphVariant ( "f0", 0 ), "csvpipe_field" ) );
CSphVariant &tTail = tConf["csvpipe_field"];
tTail.m_pNext = new CSphVariant ( "f1", 1 );
tTail.m_pNext->m_pNext = new CSphVariant ( "f2", 2 );
tTail.m_pNext->m_pNext->m_pNext = new CSphVariant ( "f3", 3 );
tTail.m_pNext->m_pNext->m_pNext->m_pNext = new CSphVariant ( "f4", 4 );
tTail.m_pNext->m_pNext->m_pNext->m_pNext->m_pNext = new CSphVariant ( "f5", 5 );
ASSERT_TRUE ( tConf.Add ( CSphVariant ( "gid", 6 ), "csvpipe_attr_uint" ) );
// setup source
CSphSource * pCSV = ( CSphSource * ) sphCreateSourceCSVpipe ( &tConf, fp, "csv" );
CSphString sError;
ASSERT_TRUE ( pCSV->Connect ( sError ) );
ASSERT_TRUE ( pCSV->IterateStart ( sError ) );
// verify that config matches to source schema
CSphSchema tSchema;
ASSERT_TRUE ( pCSV->UpdateSchema ( &tSchema, sError ) );
int iColumns = tSchema.GetFieldsCount();
// check parsed fields
for ( int iTest = 1;; )
{
bool bEOF = false;
BYTE ** pFields = pCSV->NextDocument ( bEOF, sError );
ASSERT_TRUE ( pFields || bEOF );
if ( bEOF )
break;
for ( int i = 0; i<iColumns; i++ )
{
CSphString sTmp ( ( const char * ) pFields[i] );
ASSERT_STREQ ( sTmp.cstr(), dTest[iTest + i] );
}
iTest += iWriteStride;
}
// clean up, fp will be closed automatically in CSphSource_BaseSV::Disconnect()
SafeDelete ( pCSV );
unlink ( cvs_tmpfile );
}
//////////////////////////////////////////////////////////////////////////
#if WITH_EXPAT
#include "source_xmlpipe2.h"
TEST ( Text, xml_source_attr_error )
{
const char * sTest =
R"raw(<?xml version="1.0" encoding="utf-8"?>
<sphinx:docset xmlns:sphinx="http://sphinxsearch.com/">
<sphinx:schema>
<sphinx:attr name="" type="int" />
<sphinx:field name="f" />
</sphinx:schema>
<sphinx:document id="1">
<>9</>
<f>hey</f>
</sphinx:document>
</sphinx:docset>
)raw";
const char * sRes = "source 'xml': (null) is not a valid attribute name (line=4, pos=4, docid=0)";
// prepare xml pipe
FILE * fp = tmpfile ();
fwrite ( sTest, 1, strlen ( sTest ), fp );
rewind ( fp );
CSphString sError;
// make config
CSphConfigSection tConf;
// setup source
auto * pSource = ( CSphSource * ) sphCreateSourceXmlpipe2 ( &tConf, fp, "xml", 2*1024*1024, sError );
ASSERT_FALSE ( pSource->Connect ( sError ) );
ASSERT_STREQ ( sError.cstr(), sRes );
// clean up, fp will be closed automatically in CSphSource_BaseSV::Disconnect()
SafeDelete ( pSource );
}
#endif
TEST ( Text, sphNormalizeAbsolutePath )
{
ASSERT_STREQ ( sphNormalizePath( "/" ).cstr(), "/" );
ASSERT_STREQ ( sphNormalizePath( "/..//bbb" ).cstr(), "/bbb" );
ASSERT_STREQ ( sphNormalizePath( "/quite/long/path/../../../etc/passwd" ).cstr(), "/etc/passwd" );
ASSERT_STREQ ( sphNormalizePath( "/aaa/bbb/ccc/ddd/../../../../../../../" ).cstr(), "/" );
}
TEST ( Text, sphNormalizeRelativePath )
{
ASSERT_STREQ ( sphNormalizePath( "" ).cstr(), "" );
ASSERT_STREQ ( sphNormalizePath( nullptr ).cstr(), "" );
ASSERT_STREQ ( sphNormalizePath( "aaa/" ).cstr(), "aaa" );
ASSERT_STREQ ( sphNormalizePath( "aaa/." ).cstr(), "aaa" );
ASSERT_STREQ ( sphNormalizePath( "aaa/././././////././" ).cstr(), "aaa" );
ASSERT_STREQ ( sphNormalizePath( "aaa/////" ).cstr(), "aaa" );
ASSERT_STREQ ( sphNormalizePath( "aaa/bbb/ccc" ).cstr(), "aaa/bbb/ccc" );
ASSERT_STREQ ( sphNormalizePath( "aaa/bbb/ccc/ddd/.." ).cstr(), "aaa/bbb/ccc" );
ASSERT_STREQ ( sphNormalizePath( "aaa/bbb/ccc/ddd/../../.." ).cstr(), "aaa" );
ASSERT_STREQ ( sphNormalizePath( "aaa/bbb/ccc/ddd/../../../xxx" ).cstr(), "aaa/xxx" );
ASSERT_STREQ ( sphNormalizePath( "aaa/bbb/ccc/ddd/../../../.." ).cstr(), "" );
ASSERT_STREQ ( sphNormalizePath( "aaa/bbb/ccc/ddd/../../../../" ).cstr(), "" );
ASSERT_STREQ ( sphNormalizePath( "aaa/bbb/ccc/ddd/../../../../../../../" ).cstr(), "../../.." );
ASSERT_STREQ ( sphNormalizePath( "..//bbb" ).cstr(), "../bbb" );
}
| 36,250
|
C++
|
.cpp
| 687
| 50.253275
| 246
| 0.592816
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
17,000
|
gtests_wsrep.cpp
|
manticoresoftware_manticoresearch/src/gtests/gtests_wsrep.cpp
|
//
// Copyright (c) 2023-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include <gtest/gtest.h>
#include "replication/wsrep_cxx.h"
constexpr Wsrep::UUID_t dZeroUUID { 0 };
TEST ( Wsrep, default_uuid )
{
Wsrep::UUID_t x {};
EXPECT_EQ ( x, dZeroUUID );
}
TEST ( Wsrep, Uuid2Str_Str2Uuid )
{
using namespace Wsrep;
UUID_t y { { 1, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10 } };
UUID_t Empty {};
auto sUuid = Uuid2Str ( y );
EXPECT_STREQ ( sUuid.cstr(), "01234567-89ab-cdef-fedc-ba9876543210");
auto sEmpty = Uuid2Str ( Empty );
EXPECT_STREQ ( sEmpty.cstr(), "00000000-0000-0000-0000-000000000000" );
auto x1 = Str2Uuid ( "01234567-89ab-cdef-fedc-ba9876543210" );
auto x2 = Str2Uuid ( "01234567-89Ab-CdEf-FeDc-Ba9876543210" );
EXPECT_TRUE ( y == x1 );
EXPECT_TRUE ( y == x2 );
auto wrong1 = Str2Uuid ( "" );
auto wrong2 = Str2Uuid ( "BlaBla" );
EXPECT_TRUE ( Empty == wrong1 );
EXPECT_TRUE ( Empty == wrong2 );
}
TEST ( Wsrep, default_globaltid )
{
Wsrep::GlobalTid_t x;
EXPECT_EQ ( x.m_tUuid, dZeroUUID );
EXPECT_EQ ( x.m_iSeqNo, Wsrep::WRONG_SEQNO );
}
TEST ( Wsrep, Gtid2Str_Str2Gtid )
{
using namespace Wsrep;
GlobalTid_t y { { {1, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10} }, 0xEE };
GlobalTid_t Empty;
auto sUuid = Gtid2Str ( y );
EXPECT_STREQ ( sUuid.cstr(), "01234567-89ab-cdef-fedc-ba9876543210:238" );
auto sEmpty = Gtid2Str ( Empty );
EXPECT_STREQ ( sEmpty.cstr(), "00000000-0000-0000-0000-000000000000:-1" );
EXPECT_TRUE ( y == Str2Gtid ( "01234567-89ab-cdef-fedc-ba9876543210:238" ) );
EXPECT_TRUE ( y == Str2Gtid ( "01234567-89Ab-CdEf-FeDc-Ba9876543210:238" ) );
EXPECT_TRUE ( Empty == Str2Gtid ( "" ) );
EXPECT_TRUE ( Empty == Str2Gtid ( "BlaBla" ) );
EXPECT_TRUE ( Empty == Str2Gtid ( "01234567-89Ab-CdEf-FeDc-Ba9876543210:aa" ) );
}
| 2,291
|
C++
|
.cpp
| 58
| 37.706897
| 123
| 0.693105
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,001
|
html_stripper.cpp
|
manticoresoftware_manticoresearch/src/stripper/html_stripper.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "html_stripper.h"
#include "sphinxutils.h"
#include "sphinxint.h"
#include "tokenizer/tokenizer.h"
/////////////////////////////////////////////////////////////////////////////
// HTML STRIPPER
/////////////////////////////////////////////////////////////////////////////
#if !_WIN32
char* strlwr ( char* s )
{
while ( *s )
{
*s = tolower ( *s );
s++;
}
return s;
}
#endif
static inline int sphIsTag ( int c )
{
return sphIsAlpha(c) || c=='.' || c==':';
}
static inline int sphIsTagStart ( int c )
{
return ( c>='a' && c<='z' ) || ( c>='A' && c<='Z' ) || c=='_' || c=='.' || c==':';
}
CSphHTMLStripper::CSphHTMLStripper ( bool bDefaultTags )
{
if ( bDefaultTags )
{
// known inline tags
const char * dKnown[] =
{
"a", "b", "i", "s", "u",
"basefont", "big", "em", "font", "img",
"label", "small", "span", "strike", "strong",
"sub\0", "sup\0", // fix gcc 3.4.3 on solaris10 compiler bug
"tt"
};
m_dTags.Resize ( sizeof(dKnown)/sizeof(dKnown[0]) );
ARRAY_FOREACH ( i, m_dTags )
{
m_dTags[i].m_sTag = dKnown[i];
m_dTags[i].m_iTagLen = (int) strlen ( dKnown[i] );
m_dTags[i].m_bInline = true;
}
}
UpdateTags ();
}
int CSphHTMLStripper::GetCharIndex ( int iCh ) const
{
if ( iCh>='a' && iCh<='z' ) return iCh-'a';
if ( iCh>='A' && iCh<='Z' ) return iCh-'A';
if ( iCh=='_' ) return 26;
if ( iCh==':' ) return 27;
return -1;
}
void CSphHTMLStripper::UpdateTags ()
{
m_dTags.Sort ();
for ( int i=0; i<MAX_CHAR_INDEX; i++ )
{
m_dStart[i] = INT_MAX;
m_dEnd[i] = -1;
}
ARRAY_FOREACH ( i, m_dTags )
{
int iIdx = GetCharIndex ( m_dTags[i].m_sTag.cstr()[0] );
if ( iIdx<0 )
continue;
m_dStart[iIdx] = Min ( m_dStart[iIdx], i );
m_dEnd[iIdx] = Max ( m_dEnd[iIdx], i );
}
}
bool CSphHTMLStripper::SetIndexedAttrs ( const char * sConfig, CSphString & sError )
{
if ( !sConfig || !*sConfig )
return true;
char sTag[256], sAttr[256];
const char * p = sConfig, * s;
#define LOC_ERROR(_msg,_pos) { sError.SetSprintf ( "SetIndexedAttrs(): %s near '%s'", _msg, _pos ); return false; }
while ( *p )
{
// skip spaces
while ( *p && isspace(*p) ) p++;
if ( !*p ) break;
// check tag name
s = p; while ( sphIsTag(*p) ) p++;
if ( s==p ) LOC_ERROR ( "invalid character in tag name", s );
// get tag name
if ( p-s>=(int)sizeof(sTag) ) LOC_ERROR ( "tag name too long", s );
strncpy ( sTag, s, p-s );
sTag[p-s] = '\0';
// skip spaces
while ( *p && isspace(*p) ) p++;
if ( *p++!='=' ) LOC_ERROR ( "'=' expected", p-1 );
// add indexed tag entry, if not there yet
strlwr ( sTag );
int iIndexTag = -1;
ARRAY_FOREACH ( i, m_dTags )
if ( m_dTags[i].m_sTag==sTag )
{
iIndexTag = i;
break;
}
if ( iIndexTag<0 )
{
m_dTags.Add();
m_dTags.Last().m_sTag = sTag;
m_dTags.Last().m_iTagLen = (int) strlen ( sTag );
iIndexTag = m_dTags.GetLength()-1;
}
m_dTags[iIndexTag].m_bIndexAttrs = true;
StrVec_t & dAttrs = m_dTags[iIndexTag].m_dAttrs;
// scan attributes
while ( *p )
{
// skip spaces
while ( *p && isspace(*p) ) p++;
if ( !*p ) break;
// check attr name
s = p; while ( sphIsTag(*p) ) p++;
if ( s==p ) LOC_ERROR ( "invalid character in attribute name", s );
// get attr name
if ( p-s>=(int)sizeof(sAttr) ) LOC_ERROR ( "attribute name too long", s );
strncpy ( sAttr, s, p-s );
sAttr[p-s] = '\0';
// add attr, if not there yet
int iAttr;
for ( iAttr=0; iAttr<dAttrs.GetLength(); iAttr++ )
if ( dAttrs[iAttr]==sAttr )
break;
if ( iAttr==dAttrs.GetLength() )
dAttrs.Add ( sAttr );
// skip spaces
while ( *p && isspace(*p) ) p++;
if ( !*p ) break;
// check if there's next attr or tag
if ( *p==',' ) { p++; continue; } // next attr
if ( *p==';' ) { p++; break; } // next tag
LOC_ERROR ( "',' or ';' or end of line expected", p );
}
}
#undef LOC_ERROR
UpdateTags ();
return true;
}
bool CSphHTMLStripper::SetRemovedElements ( const char * sConfig, CSphString & )
{
if ( !sConfig || !*sConfig )
return true;
const char * p = sConfig;
while ( *p )
{
// skip separators
while ( *p && !sphIsTag(*p) ) p++;
if ( !*p ) break;
// get tag name
const char * s = p;
while ( sphIsTag(*p) ) p++;
CSphString sTag;
sTag.SetBinary ( s, int(p-s) );
sTag.ToLower ();
// mark it
int iTag;
for ( iTag=0; iTag<m_dTags.GetLength(); iTag++ )
if ( m_dTags[iTag].m_sTag==sTag )
{
m_dTags[iTag].m_bRemove = true;
break;
}
if ( iTag==m_dTags.GetLength() )
{
m_dTags.Add();
m_dTags.Last().m_sTag = sTag;
m_dTags.Last().m_iTagLen = (int) strlen ( sTag.cstr() );
m_dTags.Last().m_bRemove = true;
}
}
UpdateTags ();
return true;
}
void CSphHTMLStripper::EnableParagraphs ()
{
// known block-level elements
const char * dBlock[] = { "address", "blockquote", "caption", "center",
"dd", "div", "dl", "dt", "h1", "h2", "h3", "h4", "h5", "li", "menu",
"ol", "p", "pre", "table", "tbody", "td", "tfoot", "th", "thead",
"tr", "ul", NULL };
for ( int iBlock=0; dBlock[iBlock]; iBlock++ )
{
const char * sTag = dBlock[iBlock];
// mark if known already
int iTag;
for ( iTag=0; iTag<m_dTags.GetLength(); iTag++ )
if ( m_dTags[iTag].m_sTag==sTag )
{
m_dTags[iTag].m_bPara = true;
break;
}
// add if not known yet
if ( iTag==m_dTags.GetLength() )
{
html_stripper::StripperTag_t& dTag = m_dTags.Add();
dTag.m_sTag = sTag;
dTag.m_iTagLen = (int) strlen(sTag);
dTag.m_bPara = true;
}
}
UpdateTags ();
}
bool CSphHTMLStripper::SetZones ( const char * sZones, CSphString & sError )
{
// yet another mini parser!
// index_zones = {tagname | prefix*} [, ...]
if ( !sZones || !*sZones )
return true;
const char * s = sZones;
while ( *s )
{
// skip spaces
while ( sphIsSpace(*s) )
s++;
if ( !*s )
break;
// expect ident
if ( !sphIsTagStart(*s) )
{
sError.SetSprintf ( "unexpected char near '%s' in index_zones", s );
return false;
}
// get ident (either tagname or prefix*)
const char * sTag = s;
while ( sphIsTag(*s) )
s++;
const char * sTagEnd = s;
bool bPrefix = false;
if ( *s=='*' )
{
s++;
bPrefix = true;
}
// skip spaces
while ( sphIsSpace(*s) )
s++;
// expect eof or comma after ident
if ( *s && *s!=',' )
{
sError.SetSprintf ( "unexpected char near '%s' in index_zones", s );
return false;
}
if ( *s==',' )
s++;
// got valid entry, handle it
html_stripper::StripperTag_t & tTag = m_dTags.Add();
tTag.m_sTag.SetBinary ( sTag, int(sTagEnd-sTag) );
tTag.m_iTagLen = (int)( sTagEnd-sTag );
tTag.m_bZone = true;
tTag.m_bZonePrefix = bPrefix;
}
UpdateTags ();
return true;
}
const BYTE * SkipQuoted ( const BYTE * p )
{
const BYTE * pMax = p + 512; // 512 bytes should be enough for a reasonable HTML attribute value, right?!
const BYTE * pProbEnd = NULL; // (most) probable end location in case we don't find a matching quote
BYTE cEnd = *p++; // either apostrophe or quote
while ( p<pMax && *p && *p!=cEnd )
{
if ( !pProbEnd )
if ( *p=='>' || *p=='\r' )
pProbEnd = p;
p++;
}
if ( *p==cEnd )
return p+1;
if ( pProbEnd )
return pProbEnd;
return p;
}
struct HtmlEntity_t
{
const char * m_sName;
int m_iCode;
};
static inline DWORD HtmlEntityHash ( const BYTE * str, int len )
{
static const unsigned short asso_values[] =
{
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 4,
6, 22, 1, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 170, 48, 0, 5, 44,
0, 10, 10, 86, 421, 7, 0, 1, 42, 93,
41, 421, 0, 5, 8, 14, 421, 421, 5, 11,
8, 421, 421, 421, 421, 421, 421, 1, 25, 27,
9, 2, 113, 82, 14, 3, 179, 1, 81, 91,
12, 0, 1, 180, 56, 17, 5, 31, 60, 7,
3, 161, 2, 3, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421, 421, 421, 421,
421, 421, 421, 421, 421, 421, 421
};
int hval = len;
switch ( hval )
{
default: hval += asso_values [ str[4] ];
case 4:
case 3: hval += asso_values [ str[2] ];
case 2: hval += asso_values [ str[1]+1 ];
case 1: hval += asso_values [ str[0] ];
break;
}
return hval + asso_values [ str[len-1] ];
}
static inline int HtmlEntityLookup ( const BYTE * str, int len )
{
static const unsigned char lengthtable[] =
{
0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 3, 3,
4, 3, 3, 5, 3, 6, 5, 5, 3, 4, 4, 5, 3, 4,
4, 0, 5, 4, 5, 6, 5, 6, 4, 5, 3, 3, 5, 0,
0, 0, 0, 6, 0, 5, 5, 0, 5, 6, 6, 3, 0, 3,
5, 3, 0, 6, 0, 4, 3, 6, 3, 6, 6, 6, 6, 5,
5, 5, 5, 5, 5, 2, 6, 4, 0, 6, 3, 3, 3, 0,
4, 5, 4, 4, 4, 3, 7, 4, 3, 6, 2, 3, 6, 4,
3, 6, 5, 6, 5, 5, 4, 2, 0, 0, 4, 6, 8, 0,
0, 0, 5, 5, 0, 6, 6, 2, 2, 4, 4, 6, 6, 4,
4, 5, 6, 2, 3, 4, 6, 5, 0, 2, 0, 0, 6, 6,
6, 6, 6, 4, 6, 5, 0, 6, 4, 5, 4, 6, 6, 0,
0, 4, 6, 5, 6, 0, 6, 4, 5, 6, 5, 6, 4, 0,
3, 6, 0, 4, 4, 4, 5, 4, 6, 0, 4, 4, 6, 5,
6, 7, 2, 2, 6, 2, 5, 2, 5, 0, 0, 0, 4, 4,
2, 4, 2, 2, 4, 0, 4, 4, 4, 5, 5, 0, 3, 7,
5, 0, 5, 6, 5, 0, 6, 0, 6, 0, 4, 6, 4, 6,
6, 2, 6, 0, 5, 5, 4, 6, 6, 0, 5, 6, 4, 4,
4, 4, 0, 5, 0, 5, 0, 4, 5, 4, 0, 4, 4, 4,
0, 0, 0, 4, 0, 0, 0, 5, 6, 5, 3, 0, 0, 6,
5, 4, 5, 5, 5, 5, 0, 5, 5, 0, 5, 0, 0, 0,
4, 6, 0, 3, 0, 5, 5, 0, 0, 3, 6, 5, 0, 4,
0, 0, 0, 0, 5, 7, 5, 3, 5, 3, 0, 0, 6, 0,
6, 0, 0, 7, 0, 0, 5, 0, 5, 0, 0, 0, 0, 5,
4, 0, 0, 0, 0, 0, 7, 4, 0, 0, 3, 0, 0, 0,
3, 0, 6, 0, 0, 7, 5, 5, 0, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 5,
5, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 4, 6, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5
};
static const struct HtmlEntity_t wordlist[] =
{
{"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0},
{"Rho", 929},
{"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0},
{"Chi", 935},
{"phi", 966},
{"iota", 953},
{"psi", 968},
{"int", 8747},
{"theta", 952},
{"amp", 38},
{"there4", 8756},
{"Theta", 920},
{"omega", 969},
{"and", 8743},
{"prop", 8733},
{"ensp", 8194},
{"image", 8465},
{"not", 172},
{"isin", 8712},
{"sdot", 8901},
{"", 0},
{"prime", 8242},
{"prod", 8719},
{"trade", 8482},
{"Scaron", 352},
{"kappa", 954},
{"thinsp", 8201},
{"emsp", 8195},
{"thorn", 254},
{"eta", 951},
{"chi", 967},
{"Kappa", 922},
{"", 0}, {"", 0}, {"", 0}, {"", 0},
{"scaron", 353},
{"", 0},
{"notin", 8713},
{"ndash", 8211},
{"", 0},
{"acute", 180},
{"otilde", 245},
{"atilde", 227},
{"Phi", 934},
{"", 0},
{"Psi", 936},
{"pound", 163},
{"cap", 8745},
{"", 0},
{"otimes", 8855},
{"", 0},
{"nbsp", 32},
{"rho", 961},
{"ntilde", 241},
{"eth", 240},
{"oacute", 243},
{"aacute", 225},
{"eacute", 233},
{"iacute", 237},
{"nabla", 8711},
{"Prime", 8243},
{"ocirc", 244},
{"acirc", 226},
{"ecirc", 234},
{"icirc", 238},
{"or", 8744},
{"Yacute", 221},
{"nsub", 8836},
{"", 0},
{"Uacute", 218},
{"Eta", 919},
{"ETH", 208},
{"sup", 8835},
{"", 0},
{"supe", 8839},
{"Ucirc", 219},
{"sup1", 185},
{"para", 182},
{"sup2", 178},
{"loz", 9674},
{"omicron", 959},
{"part", 8706},
{"cup", 8746},
{"Ntilde", 209},
{"Mu", 924},
{"tau", 964},
{"uacute", 250},
{"Iota", 921},
{"Tau", 932},
{"rsaquo", 8250},
{"alpha", 945},
{"Ccedil", 199},
{"ucirc", 251},
{"oline", 8254},
{"sup3", 179},
{"nu", 957},
{"", 0}, {"", 0},
{"sube", 8838},
{"Eacute", 201},
{"thetasym", 977},
{"", 0}, {"", 0}, {"", 0},
{"Omega", 937},
{"Ecirc", 202},
{"", 0},
{"lowast", 8727},
{"iquest", 191},
{"lt", 60},
{"gt", 62},
{"ordm", 186},
{"euro", 8364},
{"oslash", 248},
{"lsaquo", 8249},
{"zeta", 950},
{"cong", 8773},
{"mdash", 8212},
{"ccedil", 231},
{"ne", 8800},
{"sub", 8834},
{"Zeta", 918},
{"Lambda", 923},
{"Gamma", 915},
{"", 0},
{"Nu", 925},
{"", 0}, {"", 0},
{"ograve", 242},
{"agrave", 224},
{"egrave", 232},
{"igrave", 236},
{"frac14", 188},
{"ordf", 170},
{"Otilde", 213},
{"infin", 8734},
{"", 0},
{"frac12", 189},
{"beta", 946},
{"radic", 8730},
{"darr", 8595},
{"Iacute", 205},
{"Ugrave", 217},
{"", 0}, {"", 0},
{"harr", 8596},
{"hearts", 9829},
{"Icirc", 206},
{"Oacute", 211},
{"", 0},
{"frac34", 190},
{"cent", 162},
{"crarr", 8629},
{"curren", 164},
{"Ocirc", 212},
{"brvbar", 166},
{"sect", 167},
{"", 0},
{"ang", 8736},
{"ugrave", 249},
{"", 0},
{"Beta", 914},
{"uarr", 8593},
{"dArr", 8659},
{"asymp", 8776},
{"perp", 8869},
{"Dagger", 8225},
{"", 0},
{"hArr", 8660},
{"rang", 9002},
{"dagger", 8224},
{"exist", 8707},
{"Egrave", 200},
{"Omicron", 927},
{"mu", 956},
{"pi", 960},
{"weierp", 8472},
{"xi", 958},
{"clubs", 9827},
{"Xi", 926},
{"aring", 229},
{"", 0}, {"", 0}, {"", 0},
{"copy", 169},
{"uArr", 8657},
{"ni", 8715},
{"rarr", 8594},
{"le", 8804},
{"ge", 8805},
{"zwnj", 8204},
{"", 0},
{"apos", 39},
{"macr", 175},
{"lang", 9001},
{"gamma", 947},
{"Delta", 916},
{"", 0},
{"uml", 168},
{"alefsym", 8501},
{"delta", 948},
{"", 0},
{"bdquo", 8222},
{"lambda", 955},
{"equiv", 8801},
{"", 0},
{"Oslash", 216},
{"", 0},
{"hellip", 8230},
{"", 0},
{"rArr", 8658},
{"Atilde", 195},
{"larr", 8592},
{"spades", 9824},
{"Igrave", 204},
{"Pi", 928},
{"yacute", 253},
{"", 0},
{"diams", 9830},
{"sbquo", 8218},
{"fnof", 402},
{"Ograve", 210},
{"plusmn", 177},
{"", 0},
{"rceil", 8969},
{"Aacute", 193},
{"ouml", 246},
{"auml", 228},
{"euml", 235},
{"iuml", 239},
{"", 0},
{"Acirc", 194},
{"", 0},
{"rdquo", 8221},
{"", 0},
{"lArr", 8656},
{"rsquo", 8217},
{"Yuml", 376},
{"", 0},
{"quot", 34},
{"Uuml", 220},
{"bull", 8226},
{"", 0}, {"", 0}, {"", 0},
{"real", 8476},
{"", 0}, {"", 0}, {"", 0},
{"lceil", 8968},
{"permil", 8240},
{"upsih", 978},
{"sum", 8721},
{"", 0}, {"", 0},
{"divide", 247},
{"raquo", 187},
{"uuml", 252},
{"ldquo", 8220},
{"Alpha", 913},
{"szlig", 223},
{"lsquo", 8216},
{"", 0},
{"Sigma", 931},
{"tilde", 732},
{"", 0},
{"THORN", 222},
{"", 0}, {"", 0}, {"", 0},
{"Euml", 203},
{"rfloor", 8971},
{"", 0},
{"lrm", 8206},
{"", 0},
{"sigma", 963},
{"iexcl", 161},
{"", 0}, {"", 0},
{"deg", 176},
{"middot", 183},
{"laquo", 171},
{"", 0},
{"circ", 710},
{"", 0}, {"", 0}, {"", 0}, {"", 0},
{"frasl", 8260},
{"epsilon", 949},
{"oplus", 8853},
{"yen", 165},
{"micro", 181},
{"piv", 982},
{"", 0}, {"", 0},
{"lfloor", 8970},
{"", 0},
{"Agrave", 192},
{"", 0}, {"", 0},
{"Upsilon", 933},
{"", 0}, {"", 0},
{"times", 215},
{"", 0},
{"cedil", 184},
{"", 0}, {"", 0}, {"", 0}, {"", 0},
{"minus", 8722},
{"Iuml", 207},
{"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0},
{"upsilon", 965},
{"Ouml", 214},
{"", 0}, {"", 0},
{"rlm", 8207},
{"", 0}, {"", 0}, {"", 0},
{"reg", 174},
{"", 0},
{"forall", 8704},
{"", 0}, {"", 0},
{"Epsilon", 917},
{"empty", 8709},
{"OElig", 338},
{"", 0},
{"shy", 173},
{"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0},
{"", 0}, {"", 0}, {"", 0}, {"", 0},
{"Aring", 197},
{"", 0}, {"", 0}, {"", 0},
{"oelig", 339},
{"aelig", 230},
{"", 0},
{"zwj", 8205},
{"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0},
{"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0},
{"sim", 8764},
{"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0},
{"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0},
{"yuml", 255},
{"sigmaf", 962},
{"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0},
{"Auml", 196},
{"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0}, {"", 0},
{"", 0}, {"", 0}, {"", 0}, {"", 0},
{"AElig", 198}
};
const int MIN_WORD_LENGTH = 2;
const int MAX_WORD_LENGTH = 8;
const int MAX_HASH_VALUE = 420;
if ( len<=MAX_WORD_LENGTH && len>=MIN_WORD_LENGTH )
{
int key = HtmlEntityHash ( str, len );
if ( key<=MAX_HASH_VALUE && key>=0 )
if ( len==lengthtable[key] )
{
const char * s = wordlist[key].m_sName;
if ( *str==*s && !memcmp ( str+1, s+1, len-1 ) )
return wordlist[key].m_iCode;
}
}
return 0;
}
static const BYTE * SkipPI ( const BYTE * s )
{
assert ( s[0]=='<' && s[1]=='?' );
s += 2;
const BYTE * pStart = s;
const BYTE * pMax = s + 256;
for ( ; s<pMax && *s; s++ )
{
// for now, let's just bail whenever we see ">", like Firefox does!!!
// that covers the valid case, ie. the closing "?>", just as well
if ( s[0]=='>' )
return s+1;
}
if ( !*s )
return s;
// no closing end marker ever found; just skip non-whitespace after "<?" then
s = pStart;
while ( s<pMax && *s && !sphIsSpace(*s) )
s++;
return s;
}
void CSphHTMLStripper::Strip ( BYTE * sData ) const
{
if ( !sData )
return;
const BYTE * s = sData;
BYTE * d = sData;
while (true)
{
/////////////////////////////////////
// scan until eof, or tag, or entity
/////////////////////////////////////
while ( *s && *s!='<' && *s!='&' )
{
if ( *s>=0x20 )
*d++ = *s;
else
*d++ = ' ';
s++;
}
if ( !*s )
break;
/////////////////
// handle entity
/////////////////
if ( *s=='&' )
{
if ( s[1]=='#' )
{
// handle "&#number;" and "&#xnumber;" forms
DWORD uCode = 0;
s += 2;
bool bHex = ( *s && ( *s=='x' || *s=='X') );
if ( !bHex )
{
while ( isdigit(*s) )
uCode = uCode*10 + (*s++) - '0';
} else
{
s++;
while ( *s )
{
if ( isdigit(*s) )
uCode = uCode*16 + (*s++) - '0';
else if ( *s>=0x41 && *s<=0x46 )
uCode = uCode*16 + (*s++) - 'A' + 0xA;
else if ( *s>=0x61 && *s<=0x66 )
uCode = uCode*16 + (*s++) - 'a' + 0xA;
else
break;
}
}
uCode = uCode % 0x110000; // there is no unicode code-points bigger than this value
if ( uCode<=0x1f || *s!=';' ) // 0-31 are reserved codes
continue;
d += sphUTF8Encode ( d, (int)uCode );
s++;
} else
{
// skip until ';' or max length
if ( ( s[1]>='a' && s[1]<='z' ) || ( s[1]>='A' && s[1]<='Z' ) )
{
const int MAX_ENTITY_LEN = 8;
const BYTE * sStart = s+1;
while ( *s && *s!=';' && s-sStart<=MAX_ENTITY_LEN )
s++;
if ( *s==';' )
{
int iCode = HtmlEntityLookup ( sStart, (int)(s-sStart) );
if ( iCode>0 )
{
// this is a known entity; encode it
d += sphUTF8Encode ( d, iCode );
s++;
continue;
}
}
// rollback
s = sStart-1;
}
// if we're here, it's not an entity; pass the leading ampersand and rescan
*d++ = *s++;
}
continue;
}
//////////////
// handle tag
//////////////
assert ( *s=='<' );
if ( GetCharIndex(s[1])<0 )
{
if ( s[1]=='/' )
{
// check if it's valid closing tag
if ( GetCharIndex(s[2])<0 )
{
*d++ = *s++;
continue;
}
} else if ( s[1]=='!' )
{
if ( s[2]=='-' && s[3]=='-' )
{
// it's valid comment; scan until comment end
s += 4; // skip opening '<!--'
while ( *s )
{
if ( s[0]=='-' && s[1]=='-' && s[2]=='>' )
break;
s++;
}
if ( !*s )
break;
s += 3; // skip closing '-->'
continue;
} else if ( isalpha(s[2]) )
{
// it's <!doctype> style PI; scan until PI end
s += 2;
while ( *s && *s!='>' )
{
if ( *s=='\'' || *s=='"' )
{
s = SkipQuoted ( s );
while ( isspace(*s) ) s++;
} else
{
s++;
}
}
if ( *s=='>' )
s++;
continue;
} else
{
// it's something malformed; just ignore
*d++ = *s++;
continue;
}
} else if ( s[1]=='?' )
{
// scan until PI end
s = SkipPI ( s );
continue;
} else
{
// simply malformed
*d++ = *s++;
continue;
}
}
s++; // skip '<'
//////////////////////////////////////
// lookup this tag in known tags list
//////////////////////////////////////
const html_stripper::StripperTag_t * pTag = nullptr;
int iZoneNameLen = 0;
const BYTE * sZoneName = nullptr;
s = FindTag ( s, &pTag, &sZoneName, &iZoneNameLen );
/////////////////////////////////////
// process tag contents
// index attributes if needed
// gracefully handle malformed stuff
/////////////////////////////////////
#define LOC_SKIP_SPACES() { while ( sphIsSpace(*s) ) s++; if ( !*s || *s=='>' ) break; }
bool bIndexAttrs = ( pTag && pTag->m_bIndexAttrs );
while ( *s && *s!='>' )
{
LOC_SKIP_SPACES();
if ( sphIsTagStart(*s) )
{
// skip attribute name while it's valid
const BYTE * sAttr = s;
while ( sphIsTag(*s) )
s++;
// blanks or a value after a valid attribute name?
if ( sphIsSpace(*s) || *s=='=' )
{
const int iAttrLen = (int)( s - sAttr );
LOC_SKIP_SPACES();
// a valid name but w/o a value; keep scanning
if ( *s!='=' )
continue;
// got value!
s++;
LOC_SKIP_SPACES();
// check attribute name
// OPTIMIZE! remove linear search
int iAttr = -1;
if ( bIndexAttrs )
{
for ( iAttr=0; iAttr<pTag->m_dAttrs.GetLength(); iAttr++ )
{
auto iLen = (int) strlen ( pTag->m_dAttrs[iAttr].cstr() );
if ( iLen==iAttrLen && !strncasecmp ( pTag->m_dAttrs[iAttr].cstr(), (const char*)sAttr, iLen ) )
break;
}
if ( iAttr==pTag->m_dAttrs.GetLength() )
iAttr = -1;
}
// process the value
const BYTE * sVal = s;
if ( *s=='\'' || *s=='"' )
{
// skip quoted value until a matching quote
s = SkipQuoted ( s );
} else
{
// skip unquoted value until tag end or whitespace
while ( *s && *s!='>' && !sphIsSpace(*s) )
s++;
}
// if this one is to be indexed, copy it
if ( iAttr>=0 )
{
const BYTE * sMax = s;
if ( *sVal=='\'' || *sVal=='"' )
{
if ( sMax[-1]==sVal[0] )
sMax--;
sVal++;
}
while ( sVal<sMax )
*d++ = *sVal++;
*d++ = ' ';
}
// handled the value; keep scanning
continue;
}
// nope, got an invalid character in the sequence (or maybe eof)
// fall through to an invalid name handler
}
// keep skipping until tag end or whitespace
while ( *s && *s!='>' && !sphIsSpace(*s) )
s++;
}
#undef LOC_SKIP_SPACES
// skip closing angle bracket, if any
if ( *s )
s++;
// unknown tag is done; others might require a bit more work
if ( !pTag )
{
*d++ = ' '; // unknown tags are *not* inline by default
continue;
}
// handle zones
if ( pTag->m_bZone )
{
// should be at tag's end
assert ( s[0]=='\0' || s[-1]=='>' );
// emit secret codes
*d++ = MAGIC_CODE_ZONE;
for ( int i=0; i<iZoneNameLen; i++ )
*d++ = (BYTE) tolower ( sZoneName[i] );
if ( *d )
*d++ = MAGIC_CODE_ZONE;
if ( !*s )
break;
continue;
}
// handle paragraph boundaries
if ( pTag->m_bPara )
{
*d++ = MAGIC_CODE_PARAGRAPH;
continue;
}
// in all cases, the tag must be fully processed at this point
// not a remove-tag? we're done
if ( !pTag->m_bRemove )
{
if ( !pTag->m_bInline )
*d++ = ' ';
continue;
}
// sudden eof? bail out
if ( !*s )
break;
// must be a proper remove-tag end, then
assert ( pTag->m_bRemove && s[-1]=='>' );
// short-form? we're done
if ( s[-2]=='/' )
continue;
// skip everything until the closing tag
// FIXME! should we handle insane cases with quoted closing tag within tag?
while (true)
{
while ( *s && ( s[0]!='<' || s[1]!='/' ) ) s++;
if ( !*s ) break;
s += 2; // skip </
if ( strncasecmp ( pTag->m_sTag.cstr(), (const char*)s, pTag->m_iTagLen )!=0 ) continue;
if ( !sphIsTag ( s[pTag->m_iTagLen] ) )
{
s += pTag->m_iTagLen; // skip tag
if ( *s=='>' ) s++;
break;
}
}
if ( !pTag->m_bInline ) *d++ = ' ';
}
*d++ = '\0';
// space, paragraph sequences elimination pass
s = sData;
d = sData;
bool bSpaceOut = false;
bool bParaOut = false;
bool bZoneOut = false;
while ( const char c = *s++ )
{
assert ( d<=s-1 );
// handle different character classes
if ( sphIsSpace(c) )
{
// handle whitespace, skip dupes
if ( !bSpaceOut )
*d++ = ' ';
bSpaceOut = true;
continue;
} else if ( c==MAGIC_CODE_PARAGRAPH )
{
// handle paragraph marker, skip dupes
if ( !bParaOut && !bZoneOut )
{
*d++ = c;
bParaOut = true;
}
bSpaceOut = true;
continue;
} else if ( c==MAGIC_CODE_ZONE )
{
// zone marker
// rewind preceding paragraph, if any, it is redundant
if ( bParaOut )
{
assert ( d>sData && d[-1]==MAGIC_CODE_PARAGRAPH );
d--;
}
// copy \4zoneid\4
*d++ = c;
while ( *s && *s!=MAGIC_CODE_ZONE )
*d++ = *s++;
if ( *s )
*d++ = *s++;
// update state
// no spaces paragraphs allowed
bSpaceOut = bZoneOut = true;
bParaOut = false;
continue;
} else
{
*d++ = c;
bSpaceOut = bParaOut = bZoneOut = false;
}
}
*d++ = '\0';
}
const BYTE * CSphHTMLStripper::FindTag ( const BYTE * sSrc, const html_stripper::StripperTag_t ** ppTag,
const BYTE ** ppZoneName, int * pZoneNameLen ) const
{
assert ( sSrc && ppTag && ppZoneName && pZoneNameLen );
assert ( sSrc[0]!='/' || sSrc[1]!='\0' );
const BYTE * sTagName = ( sSrc[0]=='/' ) ? sSrc+1 : sSrc;
*ppZoneName = sSrc;
*pZoneNameLen = 0;
int iIdx = GetCharIndex ( sTagName[0] );
assert ( iIdx>=0 && iIdx<MAX_CHAR_INDEX );
if ( iIdx>=0 && m_dEnd[iIdx]>=0 )
{
int iStart = m_dStart[iIdx];
int iEnd = m_dEnd[iIdx];
for ( int i=iStart; i<=iEnd; i++ )
{
int iLen = m_dTags[i].m_iTagLen;
int iCmp = strncasecmp ( m_dTags[i].m_sTag.cstr(), (const char*)sTagName, iLen );
// the tags are sorted; so if current candidate is already greater, rest can be skipped
if ( iCmp>0 )
break;
// do we have a match?
if ( iCmp==0 )
{
// got exact match?
if ( !sphIsTag ( sTagName[iLen] ) )
{
*ppTag = m_dTags.Begin() + i;
sSrc = sTagName + iLen; // skip tag name
if ( m_dTags[i].m_bZone )
*pZoneNameLen = int ( sSrc - *ppZoneName );
break;
}
// got wildcard match?
if ( m_dTags[i].m_bZonePrefix )
{
*ppTag = m_dTags.Begin() + i;
sSrc = sTagName + iLen;
while ( sphIsTag(*sSrc) )
sSrc++;
*pZoneNameLen = int ( sSrc - *ppZoneName );
break;
}
}
}
}
return sSrc;
}
bool CSphHTMLStripper::IsValidTagStart ( int iCh ) const
{
int i = GetCharIndex ( iCh );
return ( i>=0 && i<MAX_CHAR_INDEX );
}
| 27,993
|
C++
|
.cpp
| 1,146
| 20.86911
| 116
| 0.495355
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,002
|
nodes.cpp
|
manticoresoftware_manticoresearch/src/replication/nodes.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "nodes.h"
#include "configuration.h"
#include "searchdaemon.h"
#include "coroutine.h"
#include "api_command_cluster.h"
StrVec_t ParseNodesFromString ( CSphString sNodes )
{
StrVec_t dNodes;
if ( sNodes.IsEmpty() )
return dNodes;
// sNodes passed by value, so we free to change it.
auto* szCur = const_cast<char*> ( sNodes.cstr() );
while ( *szCur ) {
// skip spaces
while ( *szCur && memchr ( ";, \t\n\r", *szCur, 6 ) != nullptr )
++szCur;
const char* szAddrs = szCur;
while ( *szCur && memchr ( ";, \t\n\r", *szCur, 6 ) == nullptr )
++szCur;
// replace delimiter with 0 for ParseListener and skip delimiter itself
if ( *szCur ) {
*szCur = '\0';
++szCur;
}
if ( *szAddrs )
dNodes.Add ( szAddrs );
}
return dNodes;
}
CSphString GetStringAddr ( const ListenerDesc_t& tListen )
{
if ( !tListen.m_sAddr.IsEmpty() )
return tListen.m_sAddr;
std::array<char, SPH_ADDRESS_SIZE> sAddr {};
sphFormatIP ( sAddr.data(), sAddr.size(), tListen.m_uIP );
return sAddr.data();
}
template<Resolve_e> struct Wait_T
{
inline static constexpr int m_dMultipliers[] { 1, 1, 1 };
inline static int m_iTimeoutMs = ReplicationTimeoutAnyNode();
inline static constexpr int m_iMultipliers = sizeof ( m_dMultipliers ) / sizeof ( int );
};
template<>
struct Wait_T<Resolve_e::SLOW>
{
inline static constexpr int m_dMultipliers[] { 1, 2, 2, 3, 3, 4, 4, 6, 6, 10, 10, 20, 30, 40, 50, 60, 70, 100, 100, 100 };
inline static int m_iTimeoutMs = ReplicationFileRetryDelay();
inline static constexpr int m_iMultipliers = sizeof ( m_dMultipliers ) / sizeof ( int );
};
// get nodes of specific type from string
template<typename PROC, Resolve_e EWAIT>
class ISphDescIterator_T : public PROC, public Wait_T<EWAIT>
{
public:
template<typename... V>
explicit ISphDescIterator_T ( V&&... tVargs ) : PROC { std::forward<V> ( tVargs )... }
{}
bool ProcessNodes ( const VecTraits_T<CSphString>& dNodes )
{
if ( dNodes.IsEmpty() )
return TlsMsg::Err ( "empty nodes list" );
int64_t tmStart = sphMicroTimer();
int iRetry = 0;
for ( int iMultiplier : Wait_T<EWAIT>::m_dMultipliers )
{
if ( dNodes.all_of ( [this] ( const auto& sNode ) { return PROC::SetNode ( sNode.cstr() ); } ) )
break;
++iRetry;
sphLogDebugRpl ( "retry %d, wait %.3f sec; error: %s", iRetry, ( Wait_T<EWAIT>::m_iTimeoutMs * iMultiplier ) / 1000.0f, TlsMsg::szError() );
if ( sphInterrupted() )
return false;
PROC::ResetNodes();
TlsMsg::ResetErr();
// should wait and retry for DNS set
Threads::Coro::SleepMsec ( Wait_T<EWAIT>::m_iTimeoutMs * iMultiplier );
}
return PROC::IsValid() || TlsMsg::Err ( "%s; in %d retries within %.3f sec", TlsMsg::szError(), iRetry, ( sphMicroTimer() - tmStart ) / 1000000.0f );
}
bool ProcessNodes ( const char* szNodes )
{
if ( !szNodes || !*szNodes )
return TlsMsg::Err ("empty nodes list");
return ProcessNodes ( ParseNodesFromString ( szNodes ) );
}
};
// get nodes functor to collect listener API with external address
class AgentDescIterator_c
{
VecAgentDesc_t& m_dNodes;
bool m_bValid = true;
public:
explicit AgentDescIterator_c ( VecAgentDesc_t& dNodes )
: m_dNodes { dNodes }
{}
[[nodiscard]] bool IsValid() const noexcept { return m_bValid; }
void ResetNodes()
{
m_dNodes.Reset();
m_bValid = true;
}
bool SetNode ( const CSphString& sNode )
{
// filter out own address to do not query itself
if ( MyIncomingApiAddrBeginsWith ( sNode.cstr() ) )
return true;
TLS_MSG_STRING ( sError );
ListenerDesc_t tListen = ParseListener ( sNode.cstr(), &sError );
if ( tListen.m_eProto==Proto_e::UNKNOWN )
{
m_bValid = false;
return false;
}
if ( tListen.m_eProto!=Proto_e::SPHINX )
return true;
if ( tListen.m_uIP==0 )
return true;
AgentDesc_t& tDesc = m_dNodes.Add();
tDesc.m_sAddr = GetStringAddr ( tListen );
tDesc.m_uAddr = tListen.m_uIP;
tDesc.m_iPort = tListen.m_iPort;
tDesc.m_bNeedResolve = false;
tDesc.m_bPersistent = false;
tDesc.m_iFamily = AF_INET;
return true;
}
};
template<Resolve_e eSpeed>
inline static void ProcessNodes ( const VecTraits_T<CSphString>& dNodes, VecAgentDesc_t& dApiNodes )
{
ISphDescIterator_T<AgentDescIterator_c, eSpeed> tIt { dApiNodes };
tIt.ProcessNodes ( dNodes );
}
VecAgentDesc_t GetDescAPINodes ( const VecTraits_T<CSphString>& dNodes, Resolve_e eSpeed )
{
TlsMsg::ResetErr();
VecAgentDesc_t dApiNodes;
if ( eSpeed == Resolve_e::QUICK )
ProcessNodes<Resolve_e::QUICK> ( dNodes, dApiNodes );
else
ProcessNodes<Resolve_e::SLOW> ( dNodes, dApiNodes );
if ( TlsMsg::HasErr() )
sphLogDebugRpl ( "node parse error: %s", TlsMsg::szError() );
return dApiNodes;
}
// get nodes functor to collect listener with specific protocol
class ListenerProtocolIterator_c
{
Proto_e m_eProto;
bool m_bResolve;
sph::StringSet m_hNodes;
public:
explicit ListenerProtocolIterator_c ( Proto_e eProto, bool bResolve )
: m_eProto ( eProto )
, m_bResolve ( bResolve )
{}
[[nodiscard]] CSphString DumpNodes() const noexcept
{
StringBuilder_c sNodes ( "," );
for_each ( m_hNodes, [&sNodes] ( const auto& tNode ) { sNodes += tNode.first.cstr(); } );
return sNodes.cstr();
}
[[nodiscard]] StrVec_t GetNodes() const noexcept
{
StrVec_t dNodes;
for_each ( m_hNodes, [&dNodes] ( const auto& tNode ) { dNodes.Add ( tNode.first ); } );
#ifndef NDEBUG
auto iLen = dNodes.GetLength();
dNodes.Uniq();
assert ( iLen == dNodes.GetLength() );
#endif
return dNodes;
}
[[nodiscard]] bool IsValid() const noexcept
{
return !m_hNodes.IsEmpty();
}
void ResetNodes()
{
m_hNodes.Reset();
}
bool SetNode ( const CSphString& sNode )
{
TLS_MSG_STRING( sError );
ListenerDesc_t tListen = ParseResolveListener ( sNode.cstr(), m_bResolve, &sError );
if ( tListen.m_eProto == Proto_e::UNKNOWN )
return false;
// filter out wrong protocol
if ( tListen.m_eProto != m_eProto )
return true;
auto sAddr = SphSprintf( "%s:%d", GetStringAddr ( tListen ).cstr(), tListen.m_iPort );
m_hNodes.Add ( sAddr );
return true;
}
};
StrVec_t FilterNodesByProto ( const VecTraits_T<CSphString>& dSrcNodes, Proto_e eProto, bool bResolve )
{
TlsMsg::ResetErr();
ISphDescIterator_T<ListenerProtocolIterator_c, Resolve_e::QUICK> tIt ( eProto, bResolve );
tIt.ProcessNodes ( dSrcNodes );
return tIt.GetNodes();
}
| 6,829
|
C++
|
.cpp
| 213
| 29.57277
| 151
| 0.697625
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,003
|
recv_state.cpp
|
manticoresoftware_manticoresearch/src/replication/recv_state.cpp
|
//
// Copyright (c) 2023-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "recv_state.h"
#include "std/fnv64.h"
#include "std/orderedhash.h"
#include "coroutine.h"
#include "send_files.h"
uint64_t DoubleStringKey ( const CSphString& sFirst, const CSphString& sSecond )
{
return sphFNV64 ( sFirst.cstr(), sFirst.Length(), sphFNV64 ( sSecond.cstr(), sSecond.Length() ) );
}
WriteResult_e RecvState_c::Write ( int iFile, const CSphVector<FileOp_t>& dOps, const VecTraits_T<BYTE>& dBuf )
{
Threads::ScopedCoroMutex_t tLock ( m_tLock );
assert ( m_pMerge );
if ( !SetFile ( iFile, false ) )
return WriteResult_e::WRITE_FAILED;
for ( const auto& tOp : dOps )
{
switch ( tOp.m_eOp )
{
case FileOp_e::COPY_FILE:
if ( !CmdCopyFile ( tOp.m_iOffFile, tOp.m_iSize ) )
return WriteResult_e::WRITE_FAILED;
break;
case FileOp_e::COPY_BUFFER:
if ( !CmdCopyBuffer ( tOp.m_iOffFile, dBuf, tOp.m_iOffBuf, tOp.m_iSize ) )
return WriteResult_e::WRITE_FAILED;
break;
case FileOp_e::VERIFY_FILE:
return CmdVerifyFile ( iFile );
default:
TlsMsg::Err ( "unknown file %d operation %d", iFile, (int)tOp.m_eOp );
return WriteResult_e::WRITE_FAILED;
}
}
return WriteResult_e::OK;
}
MergeState_t* RecvState_c::Flush ()
{
Threads::ScopedCoroMutex_t tLock ( m_tLock );
assert ( m_pMerge );
return m_pMerge.release();
}
void RecvState_c::SetMerge ( const SyncSrc_t& tSync, const SyncDst_t& tRes, const CSphString& sIndexPath, const VecTraits_T<CSphString>& dFilesRef )
{
assert ( dFilesRef.GetLength() == tRes.m_dRemotePaths.GetLength() );
Threads::ScopedCoroMutex_t tLock ( m_tLock );
m_pMerge = std::make_unique<MergeState_t>();
m_pMerge->m_sIndexPath = sIndexPath;
m_pMerge->m_dMergeMask = tRes.m_dNodeChunksMask;
m_pMerge->m_dFilesNew.CopyFrom ( tRes.m_dRemotePaths );
m_pMerge->m_dFilesRef.CopyFrom ( dFilesRef );
m_pMerge->m_dChunks.CopyFrom ( tSync.m_dChunks );
m_pMerge->m_dHashes.CopyFrom ( tSync.m_dHashes );
}
bool RecvState_c::SetFile ( int iFile, bool bRestart )
{
if ( !bRestart && m_iFile == iFile )
return true;
if ( !m_pMerge )
return TlsMsg::Err ( "missed write state" );
if ( iFile >= m_pMerge->m_dFilesNew.GetLength() )
return TlsMsg::Err ( "switching disk file %d outside of the bounds %d", iFile, m_pMerge->m_dFilesNew.GetLength() );
if ( !bRestart && m_pWriter )
return TlsMsg::Err ( "active writer %s (%d), next %s (%d)", GetFilename(), m_iFile, m_pMerge->m_dFilesNew[iFile].cstr(), iFile );
sphLogDebugRpl ( "switching disk file %s (%d>%d), restart %d", m_pMerge->m_dFilesNew[iFile].cstr(), m_iFile, iFile, (int)bRestart );
Close();
m_bFileRestarted = bRestart;
m_iFile = iFile;
int iOpenFlags = ( bRestart ? ( O_CREAT | O_RDWR | SPH_O_BINARY ) : SPH_O_NEW ); // need to keep already written data
auto pWriter = std::make_unique<WriterWithHash_c>();
if ( !pWriter->OpenFile ( m_pMerge->m_dFilesNew[iFile], iOpenFlags, m_sError ) )
return TlsMsg::Err ( m_sError );
std::unique_ptr<CSphAutoreader> pReader;
if ( sphFileExists ( m_pMerge->m_dFilesRef[iFile].cstr(), nullptr ) )
{
CSphString sError;
pReader = std::make_unique<CSphAutoreader>();
if ( !pReader->Open ( m_pMerge->m_dFilesRef[iFile], sError ) )
return TlsMsg::Err ( sError );
}
m_pWriter = std::move ( pWriter );
m_pReader = std::move ( pReader );
return true;
}
bool RecvState_c::CmdCopyFile ( int64_t iOff, int64_t iSize )
{
if ( !CheckFiles ( true, iOff ) )
return false;
while ( iSize > 0 )
{
const BYTE* pData = nullptr;
int64_t iRead = m_pReader->GetBytesZerocopy ( &pData, iSize );
if ( !iRead || m_pReader->GetErrorFlag() )
return TlsMsg::Err ( m_pReader->GetErrorMessage() );
m_pWriter->PutBytes ( pData, iRead );
if ( m_pWriter->IsError() )
return false;
iSize -= iRead;
}
return true;
}
bool RecvState_c::CmdCopyBuffer ( int64_t iOffFile, const VecTraits_T<BYTE>& dBuf, int iOffBuf, int64_t iSize )
{
if ( !CheckFiles ( false, iOffFile ) )
return false;
if ( iOffBuf + iSize > dBuf.GetLength() )
return TlsMsg::Err ( "out of bounds buffer slice (offset %d, size %d, buffer size %d) on buffer copy %s (%d)", iOffBuf, (int)iSize, dBuf.GetLength(), GetFilename(), m_iFile );
m_pWriter->PutBytes ( dBuf.Begin() + iOffBuf, iSize );
return !m_pWriter->IsError();
}
WriteResult_e RecvState_c::CmdVerifyFile ( int iFile )
{
if ( iFile != m_iFile )
{
TlsMsg::Err ( "file mismatch, active writer %s (%d), verify %s (%d)", GetFilename(), m_iFile, m_pMerge->m_dFilesNew[iFile].cstr(), iFile );
Close();
return WriteResult_e::VERIFY_FAILED;
}
// writer writes whole file from the beginning - data will be verified from writer hash
if ( m_pWriter && !m_bFileRestarted )
{
bool bVerifyOk = VerifyHashWriter ();
Close();
return ( bVerifyOk ? WriteResult_e::OK : WriteResult_e::VERIFY_FAILED );
}
// no writer or writer got restarted from offset
// data should be verified from the disk file
iFile = m_iFile; // Close will invalidate m_iFile
sphLogDebugRpl ( "verify disk file %s (%d)", GetFilename(), iFile );
Close();
bool bVerifyOk = SyncSigVerify ( m_pMerge->m_dFilesNew[iFile], m_pMerge->m_dHashes[iFile] );
return ( bVerifyOk ? WriteResult_e::OK : WriteResult_e::VERIFY_FAILED );
}
bool RecvState_c::VerifyHashWriter ()
{
assert ( m_pWriter );
assert ( m_iFile >= 0 && m_iFile < m_pMerge->m_dFilesNew.GetLength() );
// flush data and finalize hash
m_pWriter->CloseFile();
if ( m_pWriter->IsError() )
return TlsMsg::Err ( m_sError );
auto tHashWritten = m_pWriter->GetHASHBlob();
const auto& tHashDonor = m_pMerge->m_dHashes[m_iFile];
return ( tHashWritten == tHashDonor ) || TlsMsg::Err ( "%s sha1 does not match, expected %s, got %s", m_pWriter->GetFilename().cstr(), BinToHex ( tHashDonor ).cstr(), BinToHex ( tHashWritten ).cstr() );
}
void RecvState_c::Close()
{
m_pWriter = nullptr;
m_pReader = nullptr;
m_iFile = -1;
}
bool RecvState_c::CheckFiles ( bool bSeekReader, int64_t iOff )
{
if ( !m_pWriter )
return TlsMsg::Err ( "no active writer %p on data copy %s (%d)", m_pWriter.get(), GetFilename(), m_iFile );
if ( bSeekReader && !m_pReader )
return TlsMsg::Err ( "no reader %p on data copy %s (%d)", m_pReader.get(), GetFilename(), m_iFile );
if ( m_pWriter->GetPos() != iOff )
{
sphLogDebugRpl ( "file %s (%d) restarted at offset: " INT64_FMT ", writer offset: " INT64_FMT ", reader offset:" INT64_FMT, GetFilename(), m_iFile, iOff, (int64_t)m_pWriter->GetPos(), (int64_t)( m_pReader.get() ? m_pReader->GetPos() : -1 ) );
if ( !SetFile ( m_iFile, true ) )
return false;
if ( bSeekReader && !m_pReader )
return TlsMsg::Err ( "no reader %p on data copy %s (%d)", m_pReader.get(), GetFilename(), m_iFile );
m_pWriter->SeekTo ( iOff, false );
}
if ( bSeekReader && m_pReader->GetPos() != iOff )
m_pReader->SeekTo ( iOff, 0 );
return true;
}
class StatesCache_c
{
CSphOrderedHash<RecvState_c, uint64_t, IdentityHash_fn, 64> m_hStates GUARDED_BY ( m_tLock );
Threads::Coro::Mutex_c m_tLock;
public:
RecvState_c& GetState ( uint64_t tWriterKey )
{
Threads::ScopedCoroMutex_t tLock ( m_tLock );
RecvState_c* pState = m_hStates ( tWriterKey );
if ( pState )
return *pState;
return m_hStates.AddUnique ( tWriterKey );
}
void Free ( uint64_t tWriterKey )
{
Threads::ScopedCoroMutex_t tLock ( m_tLock );
m_hStates.Delete ( tWriterKey );
}
bool HasState ( uint64_t tWriterKey )
{
Threads::ScopedCoroMutex_t tLock ( m_tLock );
return m_hStates.Exists ( tWriterKey );
}
};
static StatesCache_c g_tRecvStates;
RecvState_c& RecvState::GetState ( uint64_t tWriterKey )
{
return g_tRecvStates.GetState ( tWriterKey );
}
void RecvState::Free ( uint64_t tWriterKey )
{
g_tRecvStates.Free ( tWriterKey );
}
bool RecvState::HasState ( uint64_t tWriterKey )
{
return g_tRecvStates.HasState ( tWriterKey );
}
| 8,247
|
C++
|
.cpp
| 218
| 35.5
| 244
| 0.69472
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,004
|
commit_monitor.cpp
|
manticoresoftware_manticoresearch/src/replication/commit_monitor.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "commit_monitor.h"
#include "tracer.h"
#include "searchdreplication.h"
// commit for common commands
bool CommitMonitor_c::Commit ()
{
TRACE_CONN ( "conn", "CommitMonitor_c::Commit" );
using namespace TlsMsg;
RtIndex_i * pIndex = m_tAcc.GetIndex ();
// short path for usual accum without commands
if ( m_tAcc.m_dCmd.IsEmpty ())
return pIndex && pIndex->Commit ( m_pDeletedCount, &m_tAcc );
ReplicationCommand_t& tCmd = *m_tAcc.m_dCmd[0];
bool bTruncate = tCmd.m_eCommand == ReplCmd_e::TRUNCATE;
bool bOnlyTruncate = bTruncate && ( m_tAcc.m_dCmd.GetLength() == 1 );
// process with index from accum (no need to lock/unlock it)
if ( pIndex )
return CommitNonEmptyCmds ( pIndex, tCmd, bOnlyTruncate );
auto pServed = GetServed ( tCmd.m_sIndex );
if ( !pServed )
return Err ( "requires an existing table" );
// truncate needs wlocked index
if ( ServedDesc_t::IsMutable ( pServed ) )
return bTruncate
? CommitNonEmptyCmds ( WIdx_T<RtIndex_i*> ( pServed ), tCmd, bOnlyTruncate )
: CommitNonEmptyCmds ( RIdx_T<RtIndex_i*> ( pServed ), tCmd, bOnlyTruncate );
return Err ( "requires an existing RT or percolate table" );
}
bool CommitMonitor_c::CommitNonEmptyCmds ( RtIndex_i * pIndex, const ReplicationCommand_t & tCmd, bool bOnlyTruncate ) const
{
TLS_MSG_STRING ( sError );
assert ( pIndex );
if ( !bOnlyTruncate )
return pIndex->Commit ( m_pDeletedCount, &m_tAcc, &sError );
if ( !pIndex->Truncate ( sError, RtIndex_i::TRUNCATE ))
return false;
if ( !tCmd.m_tReconfigure )
return true;
CSphReconfigureSetup tSetup;
StrVec_t dWarnings;
bool bSame = pIndex->IsSameSettings ( *tCmd.m_tReconfigure, tSetup, dWarnings, sError );
return ( bSame || !sError.IsEmpty() || pIndex->Reconfigure ( tSetup ) ) && sError.IsEmpty();
}
// commit for Total Order Isolation commands
bool CommitMonitor_c::CommitTOI()
{
const ReplicationCommand_t & tCmd = *m_tAcc.m_dCmd[0];
switch (tCmd.m_eCommand)
{
case ReplCmd_e::CLUSTER_ALTER_ADD:
case ReplCmd_e::CLUSTER_ALTER_DROP:
{
bool bOk = SetIndexesClusterTOI ( &tCmd );
sphLogDebugRpl ( "CommitTOI %s for '%s'; %s", ( bOk ? "finished" : "failed" ), tCmd.m_sCluster.cstr(), ( bOk ? "" : TlsMsg::szError() ) );
return bOk;
}
default:
return TlsMsg::Err ( "unknown command '%d'", (int) tCmd.m_eCommand );
}
}
static bool DoUpdate ( AttrUpdateArgs& tUpd, const cServedIndexRefPtr_c& pDesc, int& iUpdated, bool bUpdateAPI, bool bNeedWlock )
{
TRACE_CORO ( "rt", "commit_monitor::DoUpdate" );
if ( bUpdateAPI )
{
Debug ( bool bOk = ) [&]() {
return bNeedWlock
? HandleUpdateAPI ( tUpd, WIdx_c ( pDesc ), iUpdated )
: HandleUpdateAPI ( tUpd, RWIdx_c ( pDesc ), iUpdated );
}();
assert ( bOk ); // fixme! handle this
return ( iUpdated >= 0 );
}
HandleMySqlExtendedUpdate ( tUpd, pDesc, iUpdated, bNeedWlock );
if ( tUpd.m_pError->IsEmpty() )
iUpdated += tUpd.m_iAffected;
return ( tUpd.m_pError->IsEmpty() );
}
bool CommitMonitor_c::UpdateTOI ()
{
TRACE_CORO ( "rt", "commit_monitor::UpdateTOI" );
using namespace TlsMsg;
if ( m_tAcc.m_dCmd.IsEmpty ())
return TlsMsg::Err ( "empty accumulator" );
const ReplicationCommand_t & tCmd = *m_tAcc.m_dCmd[0];
cServedIndexRefPtr_c pServed { GetServed ( tCmd.m_sIndex ) };
if ( !pServed )
return TlsMsg::Err ( "requires an existing table" );
assert ( m_pUpdated );
assert ( m_pWarning );
assert ( tCmd.m_pUpdateAPI );
TLS_MSG_STRING ( sError );
AttrUpdateArgs tUpd;
tUpd.m_pUpdate = tCmd.m_pUpdateAPI;
tUpd.m_pError = &sError;
tUpd.m_pWarning = m_pWarning;
tUpd.m_pQuery = tCmd.m_pUpdateCond;
tUpd.m_pIndexName = &tCmd.m_sIndex;
tUpd.m_bJson = ( tCmd.m_eCommand == ReplCmd_e::UPDATE_JSON );
bool bUpdateAPI = ( tCmd.m_eCommand == ReplCmd_e::UPDATE_API );
assert ( bUpdateAPI || tCmd.m_pUpdateCond );
return DoUpdate ( tUpd, pServed, *m_pUpdated, bUpdateAPI, tCmd.m_bBlobUpdate );
}
CommitMonitor_c::~CommitMonitor_c ()
{
m_tAcc.Cleanup ();
}
| 4,429
|
C++
|
.cpp
| 118
| 35.288136
| 140
| 0.707049
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,005
|
replicate_index.cpp
|
manticoresoftware_manticoresearch/src/replication/replicate_index.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (http://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "replicate_index.h"
#include "std/string.h"
#include "std/vector.h"
#include "fileio.h"
#include "api_command_cluster.h"
#include "configuration.h"
#include "cluster_file_reserve.h"
#include "cluster_file_send.h"
#include "cluster_index_add_local.h"
#include <cmath>
// for send dist indexes
#include "nodes.h"
class IndexSaveGuard_c: public ISphNoncopyable
{
public:
explicit IndexSaveGuard_c ( cServedIndexRefPtr_c pIndexDesc )
: m_pServedIndex ( std::move ( pIndexDesc ) )
{}
void EnableSave()
{
if ( m_pServedIndex )
RIdx_T<RtIndex_i*> ( m_pServedIndex )->EnableSave();
m_pServedIndex = nullptr;
}
~IndexSaveGuard_c()
{
EnableSave();
}
private:
cServedIndexRefPtr_c m_pServedIndex;
};
static bool ActivateIndexOnRemotes ( const CSphString& sCluster, const CSphString& sIndex, IndexType_e eType, bool bSendOk, const VecTraits_T<const AgentDesc_t*>& dActivateIndexes, int64_t tmLongOpTimeout )
{
// send a command to activate transferred index
ClusterIndexAddLocalRequest_t tAddLocal;
tAddLocal.m_sCluster = sCluster;
tAddLocal.m_sIndex = sIndex;
tAddLocal.m_eIndex = eType;
tAddLocal.m_bSendFilesSuccess = bSendOk;
VecRefPtrs_t<AgentConn_t*> dNodes;
dNodes.Resize ( dActivateIndexes.GetLength() );
ARRAY_FOREACH ( i, dActivateIndexes )
{
const AgentDesc_t& tDesc = *dActivateIndexes[i];
dNodes[i] = ClusterIndexAddLocal_c::CreateAgent ( tDesc, ReplicationTimeoutQuery(), tAddLocal );
}
sphLogDebugRpl ( "sent table '%s' %s to %d nodes with timeout %d.%03d sec", sIndex.cstr(), ( bSendOk ? "loading" : "rollback" ), dNodes.GetLength(), (int)( tmLongOpTimeout / 1000 ), (int)( tmLongOpTimeout % 1000 ) );
ClusterIndexAddLocal_c tReq;
if ( !PerformRemoteTasksWrap ( dNodes, tReq, tReq, true ) )
return false;
sphLogDebugRpl ( "remote table '%s' %s", sIndex.cstr(), ( bSendOk ? "added" : "rolled-back" ) );
return true;
}
bool SyncSrc_t::CalculateFilesSignatures()
{
int64_t tmStart = sphMicroTimer();
TLS_MSG_STRING ( sError );
auto iMaxChunkBytes = InitSyncSrc();
if ( !iMaxChunkBytes.has_value() )
return false;
SHA1_c tHashFile;
SHA1_c tHashChunk;
const int iFiles = m_dIndexFiles.GetLength();
CSphFixedVector<BYTE> dReadBuf { iMaxChunkBytes.value() };
for ( int iFile = 0; iFile < iFiles; ++iFile )
{
int64_t tmStartFile = sphMicroTimer();
const CSphString& sFile = m_dIndexFiles[iFile];
const FileChunks_t& tChunk = m_dChunks[iFile];
CSphAutofile tIndexFile;
if ( tIndexFile.Open ( sFile, SPH_O_READ, sError ) < 0 )
return false;
tHashFile.Init();
int iChunk = 0;
int64_t iReadTotal = 0;
while ( iReadTotal < tChunk.m_iFileSize )
{
int64_t iLeftTotal = tChunk.m_iFileSize - iReadTotal;
int64_t iLeft = Min ( iLeftTotal, tChunk.m_iChunkBytes );
iReadTotal += iLeft;
if ( !tIndexFile.Read ( dReadBuf.Begin(), iLeft, sError ) )
return false;
// update whole file hash
tHashFile.Update ( dReadBuf.Begin(), iLeft );
// update and flush chunk hash
tHashChunk.Init();
tHashChunk.Update ( dReadBuf.Begin(), iLeft );
tHashChunk.Final ( GetChunkHash ( iFile, iChunk ) );
++iChunk;
}
tIndexFile.Close();
tHashFile.Final ( GetFileHash ( iFile ) );
int64_t tmDeltaFile = ( sphMicroTimer() - tmStartFile ) / 1000;
m_tmTimeoutFile = Max ( tmDeltaFile, m_tmTimeoutFile );
}
m_tmTimeout = Min ( ( sphMicroTimer() - tmStart ) / 1000, 300000 ); // long operation timeout but at least 5 minutes
return true;
}
// send local index to remote nodes via API
bool ReplicateIndexToNodes ( const CSphString& sCluster, const CSphString& sIndex, const VecTraits_T<AgentDesc_t>& dDesc, const cServedIndexRefPtr_c& pServedIndex )
{
assert ( !dDesc.IsEmpty ());
StrVec_t dIndexFiles;
IndexSaveGuard_c tIndexSaveGuard ( pServedIndex );
RIdx_T<RtIndex_i*> pIndex { pServedIndex };
pIndex->LockFileState ( dIndexFiles );
CSphString sIndexPath = pServedIndex->m_sIndexPath;
IndexType_e eType = pServedIndex->m_eType;
assert ( !sIndexPath.IsEmpty ());
assert ( !dIndexFiles.IsEmpty ());
sphLogDebugRpl ( "calculate sha1 of table files chunks '%s'", sIndex.cstr() );
SyncSrc_t tSigSrc { std::move ( dIndexFiles ) };
if ( !tSigSrc.CalculateFilesSignatures () )
return false;
sphLogDebugRpl ( "calculated sha1 of table '%s', files %d, hashes %d", sIndex.cstr(), tSigSrc.m_dIndexFiles.GetLength(), tSigSrc.m_dHashes.GetLength() );
int64_t tmLongOpTimeout = ReplicationTimeoutQuery ( tSigSrc.m_tmTimeout * 3 ); // timeout = sha verify (of all index files) + preload (of all index files) +1 (for slow io)
FileReserveRequest_t tRequest;
tRequest.m_sCluster = sCluster;
tRequest.m_sIndex = sIndex;
tRequest.m_pChunks = &tSigSrc;
tRequest.m_sIndexFileName = GetBaseName ( sIndexPath );
auto dNodes = ClusterFileReserve_c::MakeAgents ( dDesc, tmLongOpTimeout, tRequest );
assert ( dDesc.GetLength() == dNodes.GetLength() );
auto bOk = SendClusterFileReserve ( dNodes );
sphLogDebugRpl ( "reserved table '%s' - %s", sIndex.cstr(), ( bOk ? "ok" : "failed" ) );
if ( !bOk )
return false;
CSphVector<RemoteFileState_t> dSendStates;
CSphVector<const AgentDesc_t*> dActivateIndexes;
// collect remote file states and make list nodes and files to send
auto & sErr = TlsMsg::Err ();
sErr.StartBlock ( "; " );
ARRAY_FOREACH ( iNode, dNodes )
{
FileReserveReply_t & tRes = ClusterFileReserve_c::GetRes ( *dNodes[iNode] );
const CSphBitvec & tFilesDstMask = tRes.m_dNodeChunksMask;
if ( tSigSrc.m_dBaseNames.GetLength() != tRes.m_dRemotePaths.GetLength() && tSigSrc.m_dHashes.GetLength() != tFilesDstMask.GetSize() )
{
sErr.Sprintf ( "'%s:%d' wrong stored files %d (expected %d), hashes %d (expected %d)",
dNodes[iNode]->m_tDesc.m_sAddr.cstr (), dNodes[iNode]->m_tDesc.m_iPort,
tRes.m_dRemotePaths.GetLength(), tSigSrc.m_dBaseNames.GetLength (),
tRes.m_dNodeChunksMask.GetSize(), tSigSrc.m_dHashes.GetLength () );
continue;
}
tSigSrc.m_tmTimeout = Max ( tSigSrc.m_tmTimeout, tRes.m_tmTimeout );
tRes.m_tmTimeoutFile = ReplicationTimeoutQuery ( Max ( tSigSrc.m_tmTimeoutFile, tRes.m_tmTimeoutFile ) * 3 );
bool bFilesMatched = true;
for ( int iFile = 0; bFilesMatched && iFile < tSigSrc.m_dBaseNames.GetLength(); ++iFile )
bFilesMatched &= tFilesDstMask.BitGet ( iFile );
if ( bFilesMatched && tRes.m_bIndexActive )
continue;
RemoteFileState_t tRemoteState;
tRemoteState.m_pAgentDesc = &dDesc[iNode];
tRemoteState.m_pSyncSrc = &tSigSrc;
tRemoteState.m_pSyncDst = &tRes;
// no need to send index files to nodes there files matches exactly
if ( !bFilesMatched )
dSendStates.Add ( tRemoteState );
// after file send need also to re-activate index with new files
dActivateIndexes.Add ( &dDesc[iNode] );
}
sErr.FinishBlock ();
// recalculate timeout after nodes reports
tmLongOpTimeout = ReplicationTimeoutQuery ( tSigSrc.m_tmTimeout * 3 );
if ( dSendStates.IsEmpty() && dActivateIndexes.IsEmpty() )
return true;
sphLogDebugRpl ( "sending table '%s'", sIndex.cstr() );
bool bSendOk = true;
if ( !dSendStates.IsEmpty() )
bSendOk = RemoteClusterFileSend ( tSigSrc, dSendStates, sCluster, sIndex );
// allow index local write operations passed without replicator
tIndexSaveGuard.EnableSave ();
return ActivateIndexOnRemotes ( sCluster, sIndex, eType, bSendOk, dActivateIndexes, tmLongOpTimeout ) && bSendOk;
}
struct DistIndexSendRequest_t : public ClusterRequest_t
{
DistIndexSendRequest_t() = default;
DistIndexSendRequest_t ( const DistributedIndex_t & tDistIndex, const CSphString & sCluster, const CSphString & sIndex )
{
m_sCluster = sCluster;
m_sIndex = sIndex;
JsonEscapedBuilder sDesc;
IndexDescDistr_t tDesc = GetDistributedDesc ( tDistIndex );
tDesc.Save ( sDesc );
sDesc.MoveTo ( m_sDesc );
}
CSphString m_sIndex;
CSphString m_sDesc;
};
void operator<< ( ISphOutputBuffer & tOut, const DistIndexSendRequest_t & tReq )
{
tOut.SendString ( tReq.m_sCluster.cstr() );
tOut.SendString ( tReq.m_sIndex.cstr() );
tOut.SendString ( tReq.m_sDesc.cstr() );
}
void operator>> ( InputBuffer_c & tIn, DistIndexSendRequest_t & tReq )
{
tReq.m_sCluster = tIn.GetString();
tReq.m_sIndex = tIn.GetString();
tReq.m_sDesc = tIn.GetString();
}
using ClusterSendDistIndex_c = ClusterCommand_T<E_CLUSTER::INDEX_ADD_DIST, DistIndexSendRequest_t>;
// send distributed index to remote nodes via API
bool ReplicateDistIndexToNodes ( const CSphString & sCluster, const CSphString & sIndex, const VecTraits_T<AgentDesc_t> & dDesc )
{
cDistributedIndexRefPtr_t pDist ( GetDistr ( sIndex ) );
if ( !pDist )
{
TlsMsg::Err() << "unknown or wrong type of table '" << sIndex << "'";
return false;
}
ClusterSendDistIndex_c tReq;
DistIndexSendRequest_t tSend ( *pDist, sCluster, sIndex );
int64_t tmTimeout = ReplicationTimeoutQuery();
auto dNodes = ClusterSendDistIndex_c::MakeAgents ( dDesc, tmTimeout, tSend );
sphLogDebugRpl ( "sending table '%s' to %d nodes with timeout %d.%03d sec", sIndex.cstr(), dNodes.GetLength(), (int)( tmTimeout / 1000 ), (int)( tmTimeout % 1000 ) );
return PerformRemoteTasksWrap ( dNodes, tReq, tReq, true );
}
static bool AddDistIndex ( const DistIndexSendRequest_t & tCmd )
{
TLS_MSG_STRING ( sError );
cDistributedIndexRefPtr_t pDist ( GetDistr ( tCmd.m_sIndex ) );
if ( pDist && !pDist->m_sCluster.IsEmpty() )
return TlsMsg::Err ( "distributed table '%s:%s' is already the part of the cluster %s, remove it first", tCmd.m_sCluster.cstr(), tCmd.m_sIndex.cstr(), pDist->m_sCluster.cstr() );
CSphVector<BYTE> dBsonParsed;
if ( !sphJsonParse ( dBsonParsed, (char *)tCmd.m_sDesc.cstr(), false, false, false, sError ) )
return false;
using namespace bson;
Bson_c tBson ( dBsonParsed );
if ( tBson.IsEmpty() || !tBson.IsAssoc() )
return TlsMsg::Err ( "bad json for distributed table '%s:%s': %s", tCmd.m_sCluster.cstr(), tCmd.m_sIndex.cstr(), tCmd.m_sDesc.cstr() );
CSphString sWarning;
IndexDesc_t tIndexDesc;
if ( !tIndexDesc.Parse ( tBson, tCmd.m_sIndex, sWarning ) )
return false;
if ( !sWarning.IsEmpty() )
sphWarning ( "table '%s' create warning: %s", tCmd.m_sIndex.cstr(), sWarning.cstr() );
CSphConfigSection hConf;
tIndexDesc.Save ( hConf );
StrVec_t dWarnings;
DistributedIndexRefPtr_t pIdx ( new DistributedIndex_t );
bool bOk = ConfigureDistributedIndex ( []( const auto & sIdx ){ return true; }, *pIdx, tCmd.m_sIndex.cstr(), hConf, sError, &dWarnings );
for ( const CSphString & sMsg : dWarnings )
sphWarning ( "distributed table '%s:%s': %s", tCmd.m_sCluster.cstr(), tCmd.m_sIndex.cstr(), sMsg.cstr() );
if ( !bOk || pIdx->IsEmpty() )
{
TlsMsg::Err ( "failed to create distributed table '%s:%s': %s", tCmd.m_sCluster.cstr(), tCmd.m_sIndex.cstr(), sError.cstr() );
sError = "";
return false;
}
// finally, check and add a new or replace an existed distributed index to global table
g_pDistIndexes->AddOrReplace ( pIdx, tCmd.m_sIndex );
return true;
}
void ReceiveDistIndex ( ISphOutputBuffer & tOut, InputBuffer_c & tBuf, CSphString & sCluster )
{
ClusterSendDistIndex_c tReq;
DistIndexSendRequest_t tDistCmd;
ClusterSendDistIndex_c::ParseRequest ( tBuf, tDistCmd );
sCluster = tDistCmd.m_sCluster;
if ( AddDistIndex ( tDistCmd ) )
ClusterSendDistIndex_c::BuildReply ( tOut );
}
| 11,629
|
C++
|
.cpp
| 272
| 40.257353
| 217
| 0.729078
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,006
|
send_files.cpp
|
manticoresoftware_manticoresearch/src/replication/send_files.cpp
|
//
// Copyright (c) 2023-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "send_files.h"
#include "searchdha.h"
#include <optional>
// count of chunks for file size
int FileChunks_t::GetChunksCount () const noexcept
{
if ( m_iFileSize )
return int ( ( m_iFileSize + m_iChunkBytes - 1 ) / m_iChunkBytes );
return 0;
}
int64_t FileChunks_t::GetChunkFileLength ( int iChunk ) const noexcept
{
int64_t iSize = m_iChunkBytes;
if ( iChunk == GetChunksCount() - 1 ) // calculate file tail size for last chunk
iSize = m_iFileSize - (int64_t)m_iChunkBytes * iChunk;
return iSize;
}
int64_t FileChunks_t::GetChunkFileOffset ( int iChunk ) const noexcept
{
return (int64_t)m_iChunkBytes * iChunk;
}
SyncSrc_t::SyncSrc_t ( StrVec_t&& dIndexFiles )
{
m_dIndexFiles.SwapData ( dIndexFiles );
}
HASH20_t& SyncSrc_t::GetFileHash ( int iFile ) const noexcept
{
assert ( iFile >= 0 && iFile < m_dBaseNames.GetLength() );
return m_dHashes[iFile];
}
HASH20_t& SyncSrc_t::GetChunkHash ( int iFile, int iChunk ) const noexcept
{
assert ( iFile >= 0 && iFile < m_dBaseNames.GetLength() );
assert ( iChunk >= 0 && iChunk < m_dChunks[iFile].GetChunksCount() );
return m_dHashes[m_dChunks[iFile].m_iHashStartItem + iChunk];
}
// rsync uses sqrt ( iSize ) but that make too small buffers
constexpr int iBlockMin = 2048;
std::optional<int> SyncSrc_t::InitSyncSrc ()
{
TLS_MSG_STRING ( sError );
const int iFiles = m_dIndexFiles.GetLength();
m_dBaseNames.Reset ( iFiles );
m_dChunks.Reset ( iFiles );
int iMaxChunkBytes = 0;
m_iBufferSize = (int64_t)g_iMaxPacketSize * 3 / 4;
int iHashes = iFiles;
for ( int i = 0; i < iFiles; ++i )
{
const CSphString& sFile = m_dIndexFiles[i];
CSphAutofile tIndexFile;
if ( tIndexFile.Open ( sFile, SPH_O_READ, sError ) < 0 )
return std::nullopt;
m_dBaseNames[i] = GetBaseName ( sFile );
int64_t iFileSize = tIndexFile.GetSize();
// int iChunkBytes = int ( iFileSize / iBlockMin ); // FIXME!!! sqrt ( iFileSize )
// no need too small chunks
int64_t iChunkBytes = Min (
Min (
Max ( iBlockMin, int64_t ( sqrt ( iFileSize ) ) ),
iFileSize ),
m_iBufferSize );
assert ( iChunkBytes>0 && iChunkBytes<INT_MAX );
FileChunks_t& tChunk = m_dChunks[i];
tChunk.m_iFileSize = iFileSize;
tChunk.m_iChunkBytes = (int)iChunkBytes;
tChunk.m_iHashStartItem = iHashes;
iHashes += tChunk.GetChunksCount();
iMaxChunkBytes = Max ( tChunk.m_iChunkBytes, iMaxChunkBytes );
}
m_dHashes.Reset ( iHashes );
m_dHashes.Fill ( {} );
return iMaxChunkBytes;
}
bool VerifyFileHash ( int iFile, const CSphString& sName, const SyncSrc_t& tSrc, CSphBitvec& tDst, CSphVector<BYTE>& dBuf, CSphString& sError )
{
const FileChunks_t& tChunk = tSrc.m_dChunks[iFile];
SHA1_c tHashFile;
SHA1_c tHashChunk;
tHashFile.Init();
dBuf.Resize ( tChunk.m_iChunkBytes );
BYTE* pReadData = dBuf.Begin();
HASH20_t tFileHash {};
HASH20_t tChunkHash {};
CSphAutofile tIndexFile;
if ( tIndexFile.Open ( sName, SPH_O_READ, sError ) < 0 )
return false;
int iChunk = 0;
int64_t iReadTotal = 0;
while ( iReadTotal < tChunk.m_iFileSize )
{
int64_t iLeft = tChunk.m_iFileSize - iReadTotal;
iLeft = Min ( iLeft, tChunk.m_iChunkBytes );
iReadTotal += iLeft;
if ( !tIndexFile.Read ( pReadData, iLeft, sError ) )
return false;
// update whole file hash
tHashFile.Update ( pReadData, iLeft );
// update and flush chunk hash
tHashChunk.Init();
tHashChunk.Update ( pReadData, iLeft );
tHashChunk.Final ( tChunkHash );
if ( tChunkHash == tSrc.GetChunkHash ( iFile, iChunk ) )
tDst.BitSet ( tChunk.m_iHashStartItem + iChunk );
++iChunk;
}
tHashFile.Final ( tFileHash );
if ( tFileHash == tSrc.GetFileHash ( iFile ) )
tDst.BitSet ( iFile );
return true;
}
bool SyncSigVerify ( const CSphString& sFile, const HASH20_t& dHash )
{
CSphAutoreader tIndexFile;
{
TLS_MSG_STRING ( sError );
if ( !tIndexFile.Open ( sFile, sError ) )
return false;
}
SHA1_c tHashFile;
tHashFile.Init();
const int64_t iFileSize = tIndexFile.GetFilesize();
int64_t iReadTotal = 0;
while ( iReadTotal < iFileSize )
{
const int64_t iLeft = iFileSize - iReadTotal;
const BYTE * pData = nullptr;
const int64_t iGot = tIndexFile.GetBytesZerocopy ( &pData, iLeft );
iReadTotal += iGot;
// update whole file hash
tHashFile.Update ( pData, iGot );
}
auto dNewHash = tHashFile.FinalHash();
return dNewHash == dHash
|| TlsMsg::Err ( "%s sha1 does not matched, expected %s, got %s", sFile.cstr(), BinToHex ( dHash ).cstr(), BinToHex ( dNewHash ).cstr() );
}
bool SyncSigVerify ( const VecTraits_T<CSphString>& dFiles, const VecTraits_T<HASH20_t>& dHashes )
{
ARRAY_FOREACH ( iFile, dFiles )
{
if ( !SyncSigVerify ( dFiles[iFile], dHashes[iFile] ) )
return false;
}
return true;
}
| 5,197
|
C++
|
.cpp
| 156
| 30.955128
| 143
| 0.711942
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,007
|
cluster_get_nodes.cpp
|
manticoresoftware_manticoresearch/src/replication/cluster_get_nodes.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "cluster_get_nodes.h"
#include "cluster_commands.h"
#include "api_command_cluster.h"
#include "nodes.h"
#include "searchdreplication.h"
#include "serialize.h"
// API command to remote node to get nodes it sees
using ClusterGetNodes_c = ClusterCommand_T<E_CLUSTER::GET_NODES, ClusterRequest_t, StrVec_t>;
StrVec_t RemoteClusterGetNodes ( VectorAgentConn_t & dAgents )
{
sph::StringSet hAllNodes;
// submit initial jobs
CSphRefcountedPtr<RemoteAgentsObserver_i> tReporter ( GetObserver ());
ClusterGetNodes_c tReq;
ScheduleDistrJobs ( dAgents, &tReq, &tReq, tReporter, ReplicationRetryCount(), ReplicationRetryDelay() );
bool bDone = false;
while (!bDone)
{
// don't forget to check incoming replies after send was over
bDone = tReporter->IsDone ();
// wait one or more remote queries to complete
if ( !bDone )
tReporter->WaitChanges ();
for ( AgentConn_t * pAgent : dAgents )
{
if ( !pAgent->m_bSuccess )
continue;
pAgent->m_bSuccess = false; // re-use !m_bSuccess as 'visited' flag
// need to wait all replies in case any node get nodes list and merge these lists
// also need a way for distributed loop to finish as it can not break early
StrVec_t & dNodes = ClusterGetNodes_c::GetRes ( *pAgent );
for_each ( dNodes, [&hAllNodes] ( const CSphString& sNode ) { hAllNodes.Add ( sNode ); } );
}
}
StrVec_t dRes;
for_each ( hAllNodes, [&dRes] ( const auto& sNode ) { dRes.Add ( sNode.first ); } );
return dRes;
}
// command to all remote nodes at cluster to get actual nodes list
StrVec_t QueryNodeListFromRemotes ( const VecTraits_T<CSphString>& dClusterNodes, const CSphString& sCluster )
{
StrVec_t dNodes;
TlsMsg::ResetErr();
VecAgentDesc_t dDesc = GetDescAPINodes ( dClusterNodes, Resolve_e::QUICK );
if ( dDesc.IsEmpty() )
{
if ( TlsMsg::HasErr() )
TlsMsg::Err ( "%s invalid node, error: %s", StrVec2Str ( dClusterNodes ).cstr(), TlsMsg::szError() );
else
TlsMsg::Err ( "%s invalid node", StrVec2Str ( dClusterNodes ).cstr() );
return dNodes;
}
ClusterRequest_t dRequest;
dRequest.m_sCluster = sCluster;
VecRefPtrs_t<AgentConn_t*> dAgents = ClusterGetNodes_c::MakeAgents ( dDesc, ReplicationTimeoutAnyNode(), dRequest );
dNodes = RemoteClusterGetNodes ( dAgents );
ScopedComma_c tColon ( TlsMsg::Err(), ";" );
for ( const AgentConn_t* pAgent : dAgents )
{
if ( !pAgent->m_sFailure.IsEmpty() )
{
StringBuilder_c sOneError;
sOneError.Sprintf ( "'%s:%d': %s", pAgent->m_tDesc.m_sAddr.cstr(), pAgent->m_tDesc.m_iPort, pAgent->m_sFailure.cstr() );
sphWarning ( "%s", sOneError.cstr() );
TlsMsg::Err() << sOneError;
}
}
return dNodes;
}
void ReceiveClusterGetNodes ( ISphOutputBuffer& tOut, InputBuffer_c& tBuf, CSphString& sCluster )
{
ClusterRequest_t tCmd;
ClusterGetNodes_c::ParseRequest ( tBuf, tCmd );
sCluster = tCmd.m_sCluster;
auto dNodes = ClusterGetAllNodes ( tCmd.m_sCluster );
if ( !TlsMsg::HasErr ())
ClusterGetNodes_c::BuildReply ( tOut, dNodes );
}
StrVec_t GetNodeListFromRemotes ( const ClusterDesc_t& tDesc )
{
auto dNodes = QueryNodeListFromRemotes ( tDesc.m_dClusterNodes, tDesc.m_sName );
if ( dNodes.IsEmpty() )
TlsMsg::Err ( "cluster '%s', no nodes available(%s), error: %s", tDesc.m_sName.cstr(), StrVec2Str( tDesc.m_dClusterNodes ).cstr(), TlsMsg::szError() );
else
TlsMsg::Err ( "cluster '%s', invalid nodes '%s'(%s), error: %s", tDesc.m_sName.cstr(), StrVec2Str ( dNodes ).cstr(), StrVec2Str ( tDesc.m_dClusterNodes ).cstr(), TlsMsg::szError() );
return dNodes;
}
/////////////////////////////////////////////////////////////////////////////
// cluster get nodes state
/////////////////////////////////////////////////////////////////////////////
struct ClusterNodesStatesReply_t : public ClusterRequest_t
{
ClusterNodesStatesVec_t m_dStates;
RemoteNodeClusterState_t m_tState;
};
// API command to remote node to get node state
using ClusterNodeState_c = ClusterCommand_T<E_CLUSTER::GET_NODE_STATE, ClusterRequest_t, ClusterNodesStatesReply_t>;
void operator<< ( ISphOutputBuffer & tOut, const ClusterNodesStatesReply_t & tReq )
{
tOut << (const ClusterRequest_t&)tReq;
tOut.SendByte ( (BYTE)tReq.m_tState.m_eState );
tOut.SendString ( tReq.m_tState.m_sNode.cstr() );
tOut.SendString ( tReq.m_tState.m_sHash.cstr() );
}
void operator>> ( InputBuffer_c & tIn, ClusterNodesStatesReply_t & tReq )
{
tIn >> (ClusterRequest_t&)tReq;
RemoteNodeClusterState_t & tState = tReq.m_dStates.Add();
tState.m_eState = (ClusterState_e)tIn.GetByte();
tState.m_sNode = tIn.GetString();
tState.m_sHash = tIn.GetString();
}
static bool SendClusterNodesStates ( const CSphString & sCluster, const VecTraits_T<CSphString> & dNodes, ClusterNodesStatesVec_t & dStates )
{
ClusterNodeState_c::REQUEST_T tReq;
tReq.m_sCluster = sCluster;
auto dAgents = ClusterNodeState_c::MakeAgents ( GetDescAPINodes ( dNodes, Resolve_e::SLOW ), ReplicationTimeoutQuery(), tReq );
// no nodes left seems a valid case
if ( dAgents.IsEmpty() )
return true;
ClusterNodeState_c tReply;
// FIXME!!! handle errors
PerformRemoteTasksWrap ( dAgents, tReply, tReply, true );
for ( const AgentConn_t * pAgent : dAgents )
{
if ( pAgent->m_bSuccess )
dStates.Append ( ClusterNodeState_c::GetRes ( *pAgent ).m_dStates );
}
return true;
}
ClusterNodesStatesVec_t GetStatesFromRemotes ( const ClusterDesc_t & tDesc )
{
ClusterNodesStatesVec_t dStates;
SendClusterNodesStates ( tDesc.m_sName, tDesc.m_dClusterNodes, dStates );
return dStates;
}
void ReceiveClusterGetState ( ISphOutputBuffer & tOut, InputBuffer_c & tBuf, CSphString & sCluster )
{
ClusterNodesStatesReply_t tRequest;
ClusterNodeState_c::ParseRequest ( tBuf, tRequest );
ClusterGetState ( tRequest.m_sCluster, tRequest.m_tState );
ClusterNodeState_c::BuildReply ( tOut, tRequest );
}
struct ClusterNodeVerReply_t
{
WORD m_uVerCommandCluster = 0;
WORD m_uVerCommandReplicate = 0;
};
static const WORD g_uClusterNodeVer = 1;
// API command to remote node to get node versions
using ClusterNodeVer_c = ClusterCommand_T<E_CLUSTER::GET_NODE_VER, ClusterNodeVerReply_t, ClusterNodeVerReply_t>;
void operator<< ( ISphOutputBuffer & tOut, const ClusterNodeVerReply_t & tReq )
{
tOut.SendWord ( g_uClusterNodeVer );
tOut.SendWord ( tReq.m_uVerCommandCluster );
tOut.SendWord ( tReq.m_uVerCommandReplicate );
}
void operator>> ( InputBuffer_c & tIn, ClusterNodeVerReply_t & tReq )
{
WORD uVer = tIn.GetWord();
if ( uVer>=g_uClusterNodeVer )
{
tReq.m_uVerCommandCluster = tIn.GetWord();
tReq.m_uVerCommandReplicate = tIn.GetWord();
}
}
void ReceiveClusterGetVer ( ISphOutputBuffer & tOut )
{
ClusterNodeVerReply_t tReply;
tReply.m_uVerCommandCluster = VER_COMMAND_CLUSTER;
tReply.m_uVerCommandReplicate = GetVerCommandReplicate();
ClusterNodeVer_c::BuildReply ( tOut, tReply );
}
bool CheckRemotesVersions ( const ClusterDesc_t & tDesc )
{
ClusterNodeVer_c::REQUEST_T tReq;
auto dAgents = ClusterNodeVer_c::MakeAgents ( GetDescAPINodes ( tDesc.m_dClusterNodes, Resolve_e::QUICK ), ReplicationTimeoutAnyNode(), tReq );
// no nodes left seems a valid case
if ( dAgents.IsEmpty() )
return true;
ClusterNodeVer_c tReply;
PerformRemoteTasksWrap ( dAgents, tReply, tReply, false );
for ( const AgentConn_t * pAgent : dAgents )
{
// failure if:
// - fetched but versions are wrong either VER_COMMAND_CLUSTER or VER_COMMAND_REPLICATE
// - get remote node error reply with the wrong version message
if ( pAgent->m_bSuccess )
{
ClusterNodeVerReply_t tVer = ClusterNodeVer_c::GetRes ( *pAgent );
if ( tVer.m_uVerCommandCluster!=VER_COMMAND_CLUSTER || tVer.m_uVerCommandReplicate!=GetVerCommandReplicate() )
{
TlsMsg::Err ( "versions mismatch, node removed from the cluster '%s': %d(%d), replication: %d(%d)", tDesc.m_sName.cstr(), (int)tVer.m_uVerCommandCluster, (int)VER_COMMAND_CLUSTER, (int)tVer.m_uVerCommandReplicate, (int)GetVerCommandReplicate() );
return false;
}
} else if ( pAgent->m_sFailure.Begins ( "remote error: client version is" ) )
{
TlsMsg::ResetErr();
TlsMsg::Err ( pAgent->m_sFailure );
return false;
}
}
return true;
}
| 8,605
|
C++
|
.cpp
| 215
| 37.790698
| 250
| 0.724931
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,008
|
wsrep_v31.cpp
|
manticoresoftware_manticoresearch/src/replication/wsrep_v31.cpp
|
//
// Copyright (c) 2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "wsrep_cxx_int.h"
namespace RAW31
{
static const char* INTERFACE_VERSION = "31";
enum class Status_e : int {
OK = 0,
WARNING, // minor warning
TRX_MISSED,
TRX_FAIL,
BRUTEFORCE_ABORT,
SIZE_EXCEEDED,
CONNECTION_FAIL, // must abort
NODE_FAIL, // must reinit
FATAL, // must abort
PRECOMMIT_ABORT,
NOT_IMPL, // not implemented
};
inline const char* GetStatus ( Status_e eStatus ) noexcept
{
switch ( eStatus )
{
case Status_e::OK: return "success";
case Status_e::WARNING: return "warning";
case Status_e::TRX_MISSED: return "transaction is not known";
case Status_e::TRX_FAIL: return "transaction aborted, server can continue";
case Status_e::BRUTEFORCE_ABORT: return "transaction was victim of brute force abort";
case Status_e::SIZE_EXCEEDED: return "data exceeded maximum supported size";
case Status_e::CONNECTION_FAIL: return "error in client connection, must abort";
case Status_e::NODE_FAIL: return "error in node state, must reinit";
case Status_e::FATAL: return "fatal error, server must abort";
case Status_e::PRECOMMIT_ABORT: return "transaction was aborted before commencing pre-commit";
case Status_e::NOT_IMPL: return "feature not implemented";
default: return strerror ( static_cast<int> ( eStatus ) );
}
}
// types from wsrep internals
struct Wsrep_t;
struct NodeInfo_t;
// init/deinit
using FnInit = Status_e ( * ) ( Wsrep_t*, const InitArgs_t* );
using FnFree = void ( * ) ( Wsrep_t* );
using FnCapabilities = uint64_t ( * ) ( Wsrep_t* );
// options
using FnOptionsSet = Status_e ( * ) ( Wsrep_t*, const char* );
using FnOptionsGet = char* (*)( Wsrep_t* );
// connect
using FnConnect = Status_e ( * ) ( Wsrep_t*, const char*, const char*, const char*, bool );
using FnDisconnect = Status_e ( * ) ( Wsrep_t* );
// replication
using FnRecv = Status_e ( * ) ( Wsrep_t*, void* );
using FnReplicate = Status_e ( * ) ( Wsrep_t*, uint64_t, TxHandle_t*, DWORD, Wsrep::TrxMeta_t* );
using FnPreCommit = Status_e ( * ) ( Wsrep_t*, uint64_t, TxHandle_t*, DWORD, Wsrep::TrxMeta_t* );
using FnReplicatePreCommit = Status_e ( * ) ( Wsrep_t*, uint64_t, TxHandle_t*, DWORD, Wsrep::TrxMeta_t* );
using FnInterimCommit = Status_e ( * ) ( Wsrep_t*, TxHandle_t* );
using FnPostCommit = Status_e ( * ) ( Wsrep_t*, TxHandle_t* );
using FnApplierPreCommit = Status_e ( * ) ( Wsrep_t*, void* );
using FnApplierInterimCommit = Status_e ( * ) ( Wsrep_t*, void* );
using FnApplierPostCommit = Status_e ( * ) ( Wsrep_t*, void* );
using FnPostRollback = Status_e ( * ) ( Wsrep_t*, TxHandle_t* );
using FnReplayTrx = Status_e ( * ) ( Wsrep_t*, TxHandle_t*, void* );
using FnAbortPreCommit = Status_e ( * ) ( Wsrep_t*, int64_t, uint64_t );
using FnAppendKey = Status_e ( * ) ( Wsrep_t*, TxHandle_t*, const Key_t*, uint64_t, KeyType_e, bool );
using FnAppendData = Status_e ( * ) ( Wsrep_t*, TxHandle_t*, const Buf_t*, uint64_t, DataType_e, bool );
using FnCasualRead = Status_e ( * ) ( Wsrep_t*, Wsrep::GlobalTid_t* );
using FnFreeConnection = Status_e ( * ) ( Wsrep_t*, uint64_t );
// total order
using FnToExecuteStart = Status_e ( * ) ( Wsrep_t*, uint64_t, const Key_t*, uint64_t, const Buf_t*, uint64_t, Wsrep::TrxMeta_t* );
using FnToExecuteEnd = Status_e ( * ) ( Wsrep_t*, uint64_t );
// preordered
using FnPreorderedCollect = Status_e ( * ) ( Wsrep_t*, PoHandle_t*, const Buf_t*, uint64_t, bool );
using FnPreorderedCommit = Status_e ( * ) ( Wsrep_t*, PoHandle_t*, const Wsrep::UUID_t*, DWORD, int, bool );
// sst
using FnSstSent = Status_e ( * ) ( Wsrep_t*, const Wsrep::GlobalTid_t*, int );
using FnSstReceived = Status_e ( * ) ( Wsrep_t*, const Wsrep::GlobalTid_t*, const void*, uint64_t, int );
using FnSnapshot = Status_e ( * ) ( Wsrep_t*, const void*, uint64_t, const char* );
// stat var
using FnStatsGet = Wsrep::StatsVars_t* (*)( Wsrep_t* );
using FnStatsFree = void ( * ) ( Wsrep_t*, const Wsrep::StatsVars_t* );
using FnStatsReset = void ( * ) ( Wsrep_t* );
// misc
using FnFetchPfsInfo = void ( * ) ( Wsrep_t*, NodeInfo_t*, DWORD );
using FnPause = int64_t ( * ) ( Wsrep_t* ); // returns SeqNo
using FnResume = Status_e ( * ) ( Wsrep_t* );
using FnDesync = Status_e ( * ) ( Wsrep_t* );
using FnResync = Status_e ( * ) ( Wsrep_t* );
// lock/unlock
using FnLock = Status_e ( * ) ( Wsrep_t*, const char*, bool, uint64_t, int64_t );
using FnUnlock = Status_e ( * ) ( Wsrep_t*, const char*, uint64_t );
using FnIsLocked = bool ( * ) ( Wsrep_t*, const char*, uint64_t*, Wsrep::UUID_t* );
struct Wsrep_t
{
const char* m_szInterfaceVersion;
FnInit m_fnInit;
FnCapabilities m_fnCapabilities;
FnOptionsSet m_fnOptionsSet;
FnOptionsGet m_fnOptionsGet;
FnConnect m_fnConnect;
FnDisconnect m_fnDisconnect;
FnRecv m_fnRecv;
FnReplicate m_fnReplicate;
FnPreCommit m_fnPreCommit;
FnReplicatePreCommit m_fnReplicatePreCommit;
FnInterimCommit m_fnInterimCommit;
FnPostCommit m_fnPostCommit;
FnApplierPreCommit m_fnApplierPreCommit;
FnApplierInterimCommit m_fnApplierInterimCommit;
FnApplierPostCommit m_fnApplierPostCommit;
FnPostRollback m_fnPostRollback;
FnReplayTrx m_fnReplayTrx;
FnAbortPreCommit m_fnAbortPreCommit;
FnAppendKey m_fnAppendKey;
FnAppendData m_fnAppendData;
FnCasualRead m_fnCasualRead;
FnFreeConnection m_fnFreeConnection;
FnToExecuteStart m_fnToExecuteStart;
FnToExecuteEnd m_fnToExecuteEnd;
FnPreorderedCollect m_fnPreorderedCollect;
FnPreorderedCommit m_fnPreorderedCommit;
FnSstSent m_fnSstSent;
FnSstReceived m_fnSstReceived;
FnSnapshot m_fnSnapshot;
FnStatsGet m_fnStatsGet;
FnStatsFree m_fnStatsFree;
FnStatsReset m_fnStatsReset;
FnFetchPfsInfo m_fnFetchPfsInfo;
FnPause m_fnPause;
FnResume m_fnResume;
FnDesync m_fnDesync;
FnResync m_fnResync;
FnLock m_fnLock;
FnUnlock m_fnUnlock;
FnIsLocked m_fnIsLocked;
const char* m_szName;
const char* m_szVersion;
const char* m_szVendor;
FnFree m_fnFree;
void *m_pDlh, *m_pCtx;
};
// boring wrapper of everything in Raw::Wsrep_c, refcounted
struct WrappedWsrep_t final : public ISphRefcountedMT
{
Wsrep_t m_tWsrep;
std::unique_ptr<CSphDynamicLibrary> m_pWsrepLib;
Status_e Init ( const InitArgs_t* pArgs )
{
if ( !m_tWsrep.m_fnInit )
return Status_e::NOT_IMPL;
return m_tWsrep.m_fnInit ( &m_tWsrep, pArgs );
}
uint64_t Capabilities()
{
assert ( m_tWsrep.m_fnCapabilities );
return m_tWsrep.m_fnCapabilities ( &m_tWsrep );
}
Status_e OptionsSet ( const char* szConf )
{
assert ( m_tWsrep.m_fnOptionsSet );
return m_tWsrep.m_fnOptionsSet ( &m_tWsrep, szConf );
}
char* OptionsGet()
{
assert ( m_tWsrep.m_fnOptionsGet );
return m_tWsrep.m_fnOptionsGet ( &m_tWsrep );
}
Status_e Connect ( const char* szCluster, const char* szClusterUrl, const char* szStateDonor, bool bBootstrap )
{
assert ( m_tWsrep.m_fnConnect );
return m_tWsrep.m_fnConnect ( &m_tWsrep, szCluster, szClusterUrl, szStateDonor, bBootstrap );
}
Status_e Disconnect()
{
assert ( m_tWsrep.m_fnDisconnect );
return m_tWsrep.m_fnDisconnect ( &m_tWsrep );
}
Status_e Recv ( void* pRecvCtx )
{
assert ( m_tWsrep.m_fnRecv );
return m_tWsrep.m_fnRecv ( &m_tWsrep, pRecvCtx );
}
Status_e Replicate ( uint64_t uConnID, TxHandle_t* pHandle, DWORD uFlags, Wsrep::TrxMeta_t* pMeta )
{
assert ( m_tWsrep.m_fnReplicate );
return m_tWsrep.m_fnReplicate ( &m_tWsrep, uConnID, pHandle, uFlags, pMeta );
}
Status_e PreCommit ( uint64_t uConnID, TxHandle_t* pHandle, DWORD uFlags, Wsrep::TrxMeta_t* pMeta )
{
assert ( m_tWsrep.m_fnPreCommit );
return m_tWsrep.m_fnPreCommit ( &m_tWsrep, uConnID, pHandle, uFlags, pMeta );
}
Status_e ReplicatePreCommit ( uint64_t uConnID, TxHandle_t* pHandle, DWORD uFlags, Wsrep::TrxMeta_t* pMeta )
{
assert ( m_tWsrep.m_fnReplicatePreCommit );
return m_tWsrep.m_fnReplicatePreCommit ( &m_tWsrep, uConnID, pHandle, uFlags, pMeta );
}
Status_e InterimCommit ( TxHandle_t* pHandle )
{
assert ( m_tWsrep.m_fnInterimCommit );
return m_tWsrep.m_fnInterimCommit ( &m_tWsrep, pHandle );
}
Status_e PostCommit ( TxHandle_t* pHandle )
{
assert ( m_tWsrep.m_fnPostCommit );
return m_tWsrep.m_fnPostCommit ( &m_tWsrep, pHandle );
}
Status_e ApplierPreCommit ( void* pTrxHandle )
{
assert ( m_tWsrep.m_fnApplierPreCommit );
return m_tWsrep.m_fnApplierPreCommit ( &m_tWsrep, pTrxHandle );
}
Status_e ApplierInterimCommit ( void* pTrxHandle )
{
assert ( m_tWsrep.m_fnApplierInterimCommit );
return m_tWsrep.m_fnApplierInterimCommit ( &m_tWsrep, pTrxHandle );
}
Status_e ApplierPostCommit ( void* pTrxHandle )
{
assert ( m_tWsrep.m_fnApplierPostCommit );
return m_tWsrep.m_fnApplierPostCommit ( &m_tWsrep, pTrxHandle );
}
Status_e PostRollback ( TxHandle_t* pHandle )
{
assert ( m_tWsrep.m_fnPostRollback );
return m_tWsrep.m_fnPostRollback ( &m_tWsrep, pHandle );
}
Status_e ReplayTrx ( TxHandle_t* pHandle, void* pTrxCtx )
{
assert ( m_tWsrep.m_fnReplayTrx );
return m_tWsrep.m_fnReplayTrx ( &m_tWsrep, pHandle, pTrxCtx );
}
Status_e AbortPreCommit ( int64_t iSeqNo, uint64_t tVictimTrx )
{
assert ( m_tWsrep.m_fnAbortPreCommit );
return m_tWsrep.m_fnAbortPreCommit ( &m_tWsrep, iSeqNo, tVictimTrx );
}
Status_e AppendKey ( TxHandle_t* pHandle, const Key_t* pKeys, uint64_t iCount, KeyType_e eKeyType, bool bCopy )
{
assert ( m_tWsrep.m_fnAppendKey );
return m_tWsrep.m_fnAppendKey ( &m_tWsrep, pHandle, pKeys, iCount, eKeyType, bCopy );
}
Status_e AppendData ( TxHandle_t* pHandle, const Buf_t* pData, uint64_t iCount, DataType_e eDatatype, bool bCopy )
{
assert ( m_tWsrep.m_fnAppendData );
return m_tWsrep.m_fnAppendData ( &m_tWsrep, pHandle, pData, iCount, eDatatype, bCopy );
}
Status_e CausalRead ( Wsrep::GlobalTid_t* pGtid )
{
assert ( m_tWsrep.m_fnCasualRead );
return m_tWsrep.m_fnCasualRead ( &m_tWsrep, pGtid );
}
Status_e FreeConnection ( uint64_t uConnID )
{
assert ( m_tWsrep.m_fnFreeConnection );
return m_tWsrep.m_fnFreeConnection ( &m_tWsrep, uConnID );
}
Status_e ToExecuteStart ( uint64_t uConnID, const Key_t* pKeys, uint64_t NKeys, const Buf_t* pAction, uint64_t uCount, Wsrep::TrxMeta_t* pMeta )
{
assert ( m_tWsrep.m_fnToExecuteStart );
return m_tWsrep.m_fnToExecuteStart ( &m_tWsrep, uConnID, pKeys, NKeys, pAction, uCount, pMeta );
}
Status_e ToExecuteEnd ( uint64_t uConnID )
{
assert ( m_tWsrep.m_fnToExecuteEnd );
return m_tWsrep.m_fnToExecuteEnd ( &m_tWsrep, uConnID );
}
Status_e PreorderedCollect ( PoHandle_t* pHandle, const Buf_t* pData, uint64_t uCount, bool bCopy )
{
assert ( m_tWsrep.m_fnPreorderedCollect );
return m_tWsrep.m_fnPreorderedCollect ( &m_tWsrep, pHandle, pData, uCount, bCopy );
}
Status_e PreorderedCommit ( PoHandle_t* pHandle, const Wsrep::UUID_t* pSourceId, DWORD uFlags, int iRange, bool bCommit )
{
assert ( m_tWsrep.m_fnPreorderedCommit );
return m_tWsrep.m_fnPreorderedCommit ( &m_tWsrep, pHandle, pSourceId, uFlags, iRange, bCommit );
}
Status_e SstSent ( const Wsrep::GlobalTid_t* state_id, int iCode )
{
assert ( m_tWsrep.m_fnSstSent );
return m_tWsrep.m_fnSstSent ( &m_tWsrep, state_id, iCode );
}
Status_e SstReceived ( const Wsrep::GlobalTid_t* pStateID, const void* pState, uint64_t uLen, int iCode )
{
assert ( m_tWsrep.m_fnSstReceived );
return m_tWsrep.m_fnSstReceived ( &m_tWsrep, pStateID, pState, uLen, iCode );
}
Status_e Snapshot ( const void* pMsg, uint64_t uLen, const char* szDonorSpec )
{
assert ( m_tWsrep.m_fnSnapshot );
return m_tWsrep.m_fnSnapshot ( &m_tWsrep, pMsg, uLen, szDonorSpec );
}
Wsrep::StatsVars_t* StatsGet()
{
assert ( m_tWsrep.m_fnStatsGet );
return m_tWsrep.m_fnStatsGet ( &m_tWsrep );
}
void StatsFree ( Wsrep::StatsVars_t* pVars )
{
assert ( m_tWsrep.m_fnStatsFree );
m_tWsrep.m_fnStatsFree ( &m_tWsrep, pVars );
}
void StatsReset()
{
assert ( m_tWsrep.m_fnStatsReset );
m_tWsrep.m_fnStatsReset ( &m_tWsrep );
}
void FetchPfsInfo ( NodeInfo_t* pNodes, DWORD uSize )
{
assert ( m_tWsrep.m_fnFetchPfsInfo );
m_tWsrep.m_fnFetchPfsInfo ( &m_tWsrep, pNodes, uSize );
}
int64_t Pause()
{
assert ( m_tWsrep.m_fnPause );
return m_tWsrep.m_fnPause ( &m_tWsrep );
}
Status_e Resume()
{
assert ( m_tWsrep.m_fnResume );
return m_tWsrep.m_fnResume ( &m_tWsrep );
}
Status_e Desync()
{
assert ( m_tWsrep.m_fnDesync );
return m_tWsrep.m_fnDesync ( &m_tWsrep );
}
Status_e Resync()
{
assert ( m_tWsrep.m_fnResync );
return m_tWsrep.m_fnResync ( &m_tWsrep );
}
Status_e Lock ( const char* szName, bool bShared, uint64_t uOwner, int64_t uOut )
{
assert ( m_tWsrep.m_fnLock );
return m_tWsrep.m_fnLock ( &m_tWsrep, szName, bShared, uOwner, uOut );
}
Status_e Unlock ( const char* szName, uint64_t uOwner )
{
assert ( m_tWsrep.m_fnUnlock );
return m_tWsrep.m_fnUnlock ( &m_tWsrep, szName, uOwner );
}
bool IsLocked ( const char* szName, uint64_t* pConn, Wsrep::UUID_t* pNode )
{
assert ( m_tWsrep.m_fnIsLocked );
return m_tWsrep.m_fnIsLocked ( &m_tWsrep, szName, pConn, pNode );
}
public:
using Status_e_ = Status_e;
inline static const char* szGetStatus ( Status_e eStatus ) noexcept
{
return GetStatus ( eStatus );
}
public:
// nothing should be used if LoadWsrep() returned false.
bool CheckLoadWsrep()
{
TlsMsg::ResetErr();
auto eLogLvl = LogLevel_e::ERROR_;
AT_SCOPE_EXIT ( [&eLogLvl] { if ( TlsMsg::HasErr() ) WsrepLog ( eLogLvl, TlsMsg::szError()); TlsMsg::ResetErr(); } );
m_tWsrep.m_pDlh = m_pWsrepLib->GetLib();
if ( !!strcmp ( INTERFACE_VERSION, m_tWsrep.m_szInterfaceVersion ) )
return TlsMsg::Err ( "wrong galera interface version. Need %s, got %s", INTERFACE_VERSION, m_tWsrep.m_szInterfaceVersion );
if ( ( m_tWsrep.m_fnInit || TlsMsg::Err ( "wrong Init" ) )
&& ( m_tWsrep.m_fnCapabilities || TlsMsg::Err ( "wrong Capabilities" ) )
&& ( m_tWsrep.m_fnOptionsSet || TlsMsg::Err ( "wrong OptionsSet" ) )
&& ( m_tWsrep.m_fnOptionsGet || TlsMsg::Err ( "wrong OptionsGet" ) )
&& ( m_tWsrep.m_fnConnect || TlsMsg::Err ( "wrong Connect" ) )
&& ( m_tWsrep.m_fnDisconnect || TlsMsg::Err ( "wrong Disconnect" ) )
&& ( m_tWsrep.m_fnRecv || TlsMsg::Err ( "wrong Recv" ) )
&& ( m_tWsrep.m_fnReplicate || TlsMsg::Err ( "wrong Replicate" ) )
&& ( m_tWsrep.m_fnPreCommit || TlsMsg::Err ( "wrong PreCommit" ) )
&& ( m_tWsrep.m_fnReplicatePreCommit || TlsMsg::Err ( "wrong ReplicatePreCommit" ) )
&& ( m_tWsrep.m_fnInterimCommit || TlsMsg::Err ( "wrong InterimCommit" ) )
&& ( m_tWsrep.m_fnPostCommit || TlsMsg::Err ( "wrong PostCommit" ) )
&& ( m_tWsrep.m_fnApplierPreCommit || TlsMsg::Err ( "wrong ApplierPreCommit" ) )
&& ( m_tWsrep.m_fnApplierInterimCommit || TlsMsg::Err ( "wrong ApplierInterimCommit" ) )
&& ( m_tWsrep.m_fnApplierPostCommit || TlsMsg::Err ( "wrong ApplierPostCommit" ) )
&& ( m_tWsrep.m_fnPostRollback || TlsMsg::Err ( "wrong PostRollback" ) )
&& ( m_tWsrep.m_fnReplayTrx || TlsMsg::Err ( "wrong ReplayTrx" ) )
&& ( m_tWsrep.m_fnAbortPreCommit || TlsMsg::Err ( "wrong AbortPreCommit" ) )
&& ( m_tWsrep.m_fnAppendKey || TlsMsg::Err ( "wrong AppendKey" ) )
&& ( m_tWsrep.m_fnAppendData || TlsMsg::Err ( "wrong AppendData" ) )
&& ( m_tWsrep.m_fnCasualRead || TlsMsg::Err ( "wrong CasualRead" ) )
&& ( m_tWsrep.m_fnFreeConnection || TlsMsg::Err ( "wrong FreeConnection" ) )
&& ( m_tWsrep.m_fnToExecuteStart || TlsMsg::Err ( "wrong ToExecuteStart" ) )
&& ( m_tWsrep.m_fnToExecuteEnd || TlsMsg::Err ( "wrong ToExecuteEnd" ) )
&& ( m_tWsrep.m_fnPreorderedCollect || TlsMsg::Err ( "wrong PreorderedCollect" ) )
&& ( m_tWsrep.m_fnPreorderedCommit || TlsMsg::Err ( "wrong PreorderedCommit" ) )
&& ( m_tWsrep.m_fnSstSent || TlsMsg::Err ( "wrong SstSent" ) )
&& ( m_tWsrep.m_fnSstReceived || TlsMsg::Err ( "wrong SstReceived" ) )
&& ( m_tWsrep.m_fnSnapshot || TlsMsg::Err ( "wrong Snapshot" ) )
&& ( m_tWsrep.m_fnStatsGet || TlsMsg::Err ( "wrong StatsGet" ) )
&& ( m_tWsrep.m_fnStatsFree || TlsMsg::Err ( "wrong StatsFree" ) )
&& ( m_tWsrep.m_fnStatsReset || TlsMsg::Err ( "wrong StatsReset" ) )
&& ( m_tWsrep.m_fnFetchPfsInfo || TlsMsg::Err ( "wrong FetchPfsInfo" ) )
&& ( m_tWsrep.m_fnPause || TlsMsg::Err ( "wrong Pause" ) )
&& ( m_tWsrep.m_fnResume || TlsMsg::Err ( "wrong Resume" ) )
&& ( m_tWsrep.m_fnDesync || TlsMsg::Err ( "wrong Desync" ) )
&& ( m_tWsrep.m_fnResync || TlsMsg::Err ( "wrong Resync" ) )
&& ( m_tWsrep.m_fnLock || TlsMsg::Err ( "wrong Lock" ) )
&& ( m_tWsrep.m_fnUnlock || TlsMsg::Err ( "wrong Unlock" ) )
&& ( m_tWsrep.m_fnIsLocked || TlsMsg::Err ( "wrong IsLocked" ) ) )
{
eLogLvl = LogLevel_e::INFO;
TlsMsg::Err() << m_tWsrep.m_szName << " " << m_tWsrep.m_szVersion << " by " << m_tWsrep.m_szVendor << " loaded ok.";
return true;
}
return false;
}
protected:
~WrappedWsrep_t() final
{
if ( !m_tWsrep.m_fnFree )
return;
m_tWsrep.m_fnFree ( &m_tWsrep );
m_tWsrep.m_fnFree = nullptr;
}
};
enum class PfsType_e { UNKNOWN, MUTEX, CONDVAR, THREAD, FILE };
enum class PfsOps_e { UNKNOWN, INIT, DESTROY, LOCK, UNLOCK, WAIT, TIMEDWAIT, SIGNAL, BROADCAST, CREATE, OPEN, CLOSE, DELETE_ }; // named this way because of macro conflict with winnt.h
enum class PfsTag_e : int;
class Provider_c final : public Provider_T<WrappedWsrep_t>, public Wsrep::Applier_i
{
using BASE = Provider_T<WrappedWsrep_t>;
static void ReplicationAbort()
{
sphWarning ( "abort from replication provider" );
}
// callback for Galera pfs_instr_cb there all mutex \ threads \ events should be implemented.
static void Instr_fn ( PfsType_e eType, PfsOps_e eOps, PfsTag_e eTag, void** ppValue, void** ppAliedValue, const void* pTs )
{
switch ( eType )
{
case PfsType_e::THREAD:
case PfsType_e::FILE:
default:
return;
#if !_WIN32
case PfsType_e::MUTEX:
{
switch ( eOps )
{
case PfsOps_e::INIT:
{
auto* pMutex = new pthread_mutex_t;
pthread_mutex_init ( pMutex, nullptr );
*ppValue = pMutex;
}
break;
case PfsOps_e::DESTROY:
{
auto* pMutex = (pthread_mutex_t*)( *ppValue );
assert ( pMutex );
pthread_mutex_destroy ( pMutex );
delete ( pMutex );
*ppValue = nullptr;
}
break;
case PfsOps_e::LOCK:
{
auto* pMutex = (pthread_mutex_t*)( *ppValue );
assert ( pMutex );
pthread_mutex_lock ( pMutex );
}
break;
case PfsOps_e::UNLOCK:
{
auto* pMutex = (pthread_mutex_t*)( *ppValue );
assert ( pMutex );
pthread_mutex_unlock ( pMutex );
}
break;
default:
assert ( 0 );
break;
}
}
case PfsType_e::CONDVAR:
{
switch ( eOps )
{
case PfsOps_e::INIT:
{
auto* pCond = new pthread_cond_t;
pthread_cond_init ( pCond, nullptr );
*ppValue = pCond;
}
break;
case PfsOps_e::DESTROY:
{
auto* pCond = (pthread_cond_t*)( *ppValue );
assert ( pCond );
pthread_cond_destroy ( pCond );
delete ( pCond );
*ppValue = nullptr;
}
break;
case PfsOps_e::WAIT:
{
auto* pCond = (pthread_cond_t*)( *ppValue );
auto* pMutex = (pthread_mutex_t*)( *ppAliedValue );
assert ( pCond && pMutex );
pthread_cond_wait ( pCond, pMutex );
}
break;
case PfsOps_e::TIMEDWAIT:
{
auto* pCond = (pthread_cond_t*)( *ppValue );
auto* pMutex = (pthread_mutex_t*)( *ppAliedValue );
const auto* wtime = (const timespec*)pTs;
assert ( pCond && pMutex );
pthread_cond_timedwait ( pCond, pMutex, wtime );
}
break;
case PfsOps_e::SIGNAL:
{
auto* pCond = (pthread_cond_t*)( *ppValue );
assert ( pCond );
pthread_cond_signal ( pCond );
}
break;
case PfsOps_e::BROADCAST:
{
auto* pCond = (pthread_cond_t*)( *ppValue );
assert ( pCond );
pthread_cond_broadcast ( pCond );
}
break;
default:
assert ( 0 );
break;
}
}
#endif
}
}
public:
using WSREPWRAP = WrappedWsrep_t;
Provider_c ( Wsrep::Cluster_i* pCluster, WSREPWRAP* pWsrep )
: BASE { pCluster, pWsrep }
{ }
bool Init ( CSphString sName, const char* szListenAddr, const char* szIncoming, const char* szPath, const char* szOptions )
{
if ( !m_pWsrep )
return false;
m_sName = std::move ( sName );
struct
{
void* m_pCtx;
const char* m_szName;
const char* m_szAddress;
const char* m_szIncoming;
const char* m_szPath;
const char* m_szOptions;
int m_iProtoVer;
const Wsrep::GlobalTid_t* m_pStateID;
const char* m_sState;
uint64_t m_uStateLen;
// callbacks
void ( *m_fnLoger ) ( LogLevel_e, const char* );
CbStatus_e ( *m_fnViewChanged ) ( void*, void*, const Wsrep::ViewInfo_t*, const char*, uint64_t, void**, uint64_t* );
CbStatus_e ( *m_fnApply ) ( void*, const void*, uint64_t, DWORD, const Wsrep::TrxMeta_t* );
CbStatus_e ( *m_fnCommit ) ( void*, const void*, DWORD, const Wsrep::TrxMeta_t*, bool*, bool );
CbStatus_e ( *Unordered_fn ) ( void*, const void*, uint64_t );
CbStatus_e ( *SstDonate_fn ) ( void*, void*, const void*, uint64_t, const Wsrep::GlobalTid_t*, const char*, uint64_t, bool );
void ( *m_fnSynced ) ( void* );
void ( *m_fnReplicationAbort )();
void ( *m_fnInstr ) ( PfsType_e, PfsOps_e, PfsTag_e, void**, void**, const void* );
} tArgs = {
m_pCluster, m_sName.cstr(), szListenAddr, szIncoming, szPath, szOptions, 127, &m_tStateID, "", 0
, WsrepLog
, ViewChanged_fn // app + recv
, Apply_fn // recv
, &BASE::Commit_fn // recv
, Unordered_fn // recv
, SstDonate_fn // app + recv
, Synced_fn // app
, ReplicationAbort
, Instr_fn
};
auto eRes = m_pWsrep->Init ( &tArgs );
return eRes == Status_e::OK || TlsMsg::Err ( "replication init failed: %d '%s'", (int)eRes, GetStatus ( eRes ) );
}
void ApplierPreCommit ( const void* pTrx_handle ) final
{
m_pWsrep->ApplierPreCommit ( const_cast<void*> ( pTrx_handle ) );
}
void ApplierInterimPostCommit ( const void* pTrx_handle ) final
{
m_pWsrep->ApplierInterimCommit ( const_cast<void*> ( pTrx_handle ) );
m_pWsrep->ApplierPostCommit ( const_cast<void*> ( pTrx_handle ) );
}
Wsrep::Applier_i* GetApplier() final
{
return this;
}
};
} // namespace RAW31
template<>
void Writeset_T<RAW31::WrappedWsrep_t>::InterimCommit()
{
m_eLastRes = m_pWsrep->InterimCommit ( &m_tHnd );
CheckResult ( "InterimCommit" );
m_bNeedPostRollBack = false;
}
template<>
[[nodiscard]] bool Writeset_T<RAW31::WrappedWsrep_t>::Replicate()
{
m_eLastRes = m_pWsrep->Replicate ( m_uConnId, &m_tHnd, DwFlags_t::COMMIT, &m_tMeta );
return CheckResult ( "Replicate", true );
}
Wsrep::Provider_i* MakeProviderV31 ( WsrepLoader_t tLoader, Wsrep::Cluster_i* pCluster, CSphString sName, const char* szListenAddr, const char* szIncoming, const char* szPath, const char* szOptions )
{
return MakeProvider<RAW31::Provider_c> ( std::move ( tLoader ), pCluster, std::move ( sName ), szListenAddr, szIncoming, szPath, szOptions );
}
| 23,332
|
C++
|
.cpp
| 622
| 34.389068
| 199
| 0.678799
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,009
|
wsrep_cxx.cpp
|
manticoresoftware_manticoresearch/src/replication/wsrep_cxx.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "wsrep_cxx_int.h"
static const char* g_dReplicatorPatterns[] = {
"Could not open state file for reading:",
"No persistent state found. Bootstrapping with default state",
"Fail to access the file (" };
bool CheckNoWarning ( const char* szMsg )
{
if ( !szMsg || !szMsg[0] )
return false;
for ( const char* sPattern : g_dReplicatorPatterns )
{
if ( strncmp ( szMsg, sPattern, strlen ( sPattern ) ) == 0 )
return true;
}
return false;
}
// callback for Galera logger_cb to log messages and errors
void WsrepLog ( LogLevel_e eLevel, const char* szMsg )
{
// in normal flow need to skip certain messages from Galera but keep message in debug replication verbosity level
// don't want to patch Galera source code
if ( g_eLogLevel < SPH_LOG_RPL_DEBUG && eLevel == LogLevel_e::WARN && CheckNoWarning ( szMsg ) )
return;
ESphLogLevel eLevelDst = SPH_LOG_INFO;
switch ( eLevel )
{
case LogLevel_e::FATAL:
case LogLevel_e::ERROR_:
eLevelDst = SPH_LOG_FATAL;
break;
case LogLevel_e::WARN:
eLevelDst = SPH_LOG_WARNING;
break;
case LogLevel_e::INFO:
case LogLevel_e::DEBUG:
default:
eLevelDst = SPH_LOG_RPL_DEBUG;
}
sphLogf ( eLevelDst, "%s", szMsg );
}
const char * Wsrep::GetViewStatus ( Wsrep::ViewStatus_e eStatus ) noexcept
{
using namespace Wsrep;
switch ( eStatus )
{
case ViewStatus_e::PRIMARY: return "primary";
case ViewStatus_e::NON_PRIMARY: return "non-primary";
case ViewStatus_e::DISCONNECTED: return "disconnected";
default: return "unknown (MAX)";
}
}
// log debug info about nodes as current nodes views that
void LogGroupView ( const Wsrep::ViewInfo_t* pView )
{
if ( g_eLogLevel < SPH_LOG_RPL_DEBUG )
return;
using namespace Wsrep;
sphLogDebugRpl ( "new cluster membership: %d(%d), global seqno: " INT64_FMT ", status %s, gap: %s",
pView->m_iIdx, pView->m_iNMembers, pView->m_tStateId.m_iSeqNo, GetViewStatus ( pView->m_eStatus ), pView->m_bGap ? "true" : "false" );
StringBuilder_c sBuf;
const MemberInfo_t* pBoxes = &pView->m_tMemInfo;
for ( int i = 0; i < pView->m_iNMembers; ++i )
sBuf.Appendf ( "\n'%s', '%s' %s", pBoxes[i].m_sName, pBoxes[i].m_sIncoming, ( i == pView->m_iIdx ? "*" : "" ) );
sphLogDebugRpl ( "%s", sBuf.IsEmpty() ? "no nodes" : sBuf.cstr() );
}
DEFINE_RENDER ( GaleraInfo_t )
{
auto& tInfo = *(GaleraInfo_t*)const_cast<void*> ( pSrc );
dDst.m_sChain << "Repl ";
dDst.m_sClientName << "wsrep " << tInfo.m_sName.cstr();
}
CSphString Wsrep::Uuid2Str ( const Wsrep::UUID_t& tUuid )
{
auto h = [&tUuid] ( int idx ) { return FixedNum_T<BYTE, 16, 2, 0, '0'> ( tUuid[idx] ); };
CSphString sResult;
StringBuilder_c{}.Sprint (h(0),h(1),h(2),h(3),'-',h(4),h(5),'-',h(6),h(7),'-',h(8),h(9),'-',h(10),h(11),h(12),h(13),h(14),h(15)).MoveTo(sResult);
return sResult;
}
CSphString Wsrep::Gtid2Str ( const Wsrep::GlobalTid_t& tGtid )
{
CSphString sResult;
StringBuilder_c {}.Sprint ( Wsrep::Uuid2Str ( tGtid.m_tUuid ), ':', tGtid.m_iSeqNo ).MoveTo ( sResult );
return sResult;
}
#if __has_include( <charconv>)
template <typename INT>
inline static std::from_chars_result from_chars_wrap ( const char* pBegin, const char* pEnd, INT& iValue, int iBase=10 )
{
return std::from_chars ( pBegin, pEnd, iValue, iBase );
}
#else
struct from_chars_result
{
const char* ptr;
std::errc ec;
};
inline static from_chars_result from_chars_wrap ( const char* pBegin, const char* pEnd, int64_t& iValue )
{
from_chars_result tRes;
iValue = strtoll ( pBegin, (char**)&tRes.ptr, 10 );
if ( tRes.ptr < pEnd )
tRes.ec = std::errc::invalid_argument;
return tRes;
}
inline static from_chars_result from_chars_wrap ( const char* pBegin, const char* pEnd, BYTE& uValue, int iBase )
{
assert ( iBase == 16 );
from_chars_result tRes { pBegin, std::errc::invalid_argument };
auto Char2Hex = [] ( BYTE uChar ) -> int {
switch ( uChar ) {
case '0': return 0;
case '1': return 1;
case '2': return 2;
case '3': return 3;
case '4': return 4;
case '5': return 5;
case '6': return 6;
case '7': return 7;
case '8': return 8;
case '9': return 9;
case 'a':
case 'A': return 10;
case 'b':
case 'B': return 11;
case 'c':
case 'C': return 12;
case 'd':
case 'D': return 13;
case 'e':
case 'E': return 14;
case 'f':
case 'F': return 15;
default: return -1;
}
};
auto iVal = Char2Hex ( *tRes.ptr );
if ( iVal < 0 )
return tRes;
uValue = iVal << 4;
++tRes.ptr;
iVal = Char2Hex ( *tRes.ptr );
if ( iVal < 0 )
return tRes;
uValue += iVal;
++tRes.ptr;
tRes.ec = static_cast<std::errc> ( 0 );
return tRes;
}
#endif
const char * Str2UuidImpl ( Wsrep::UUID_t& dRes, const CSphString & sUuid )
{
int iUuidBytes = 0;
const char* pSrc = sUuid.cstr();
const char* pDigitEnd = pSrc + 2;
const char* pEnd = pSrc + sUuid.Length();
while ( pDigitEnd <= pEnd && iUuidBytes < 16 )
{
auto tChars = from_chars_wrap ( pSrc, pDigitEnd, dRes[iUuidBytes++], 16 );
if ( tChars.ec == std::errc::invalid_argument )
break;
pSrc = tChars.ptr + ( iUuidBytes != 16 && tChars.ptr < pEnd && *tChars.ptr == '-' );
pDigitEnd = pSrc + 2;
}
if ( iUuidBytes < 16 )
dRes = {};
return pSrc;
}
Wsrep::UUID_t Wsrep::Str2Uuid ( const CSphString & sUuid )
{
Wsrep::UUID_t dRes;
Str2UuidImpl ( dRes, sUuid );
return dRes;
}
Wsrep::GlobalTid_t Wsrep::Str2Gtid ( const CSphString& sGtid )
{
Wsrep::GlobalTid_t dRes;
const char* pEnd = sGtid.cstr() + sGtid.Length();
const char* pSrc = Str2UuidImpl ( dRes.m_tUuid, sGtid );
if ( pSrc+1 < pEnd && *pSrc == ':' )
{
++pSrc;
auto tChars = from_chars_wrap ( pSrc, pEnd, dRes.m_iSeqNo );
if ( tChars.ec == std::errc::invalid_argument )
dRes = {};
} else dRes = {};
return dRes;
}
std::atomic<uint64_t> uWritesetConnIds {1};
WsrepLoader_t TryWsrep()
{
WsrepLoader_t tRes;
static const char* VANILLA_GALERA = "/usr/lib/galera/libgalera_smm.so";
tRes.m_pLibrary = std::make_unique<CSphDynamicLibrary> ( GET_GALERA_FULLPATH().cstr(), false );
if ( !tRes.m_pLibrary->GetLib() )
tRes.m_pLibrary->CSphDynamicLibraryAlternative ( VANILLA_GALERA, false );
if ( !tRes.m_pLibrary->GetLib() )
{
TlsMsg::Err ( "no wsrep provider available. Tried %s, then %s", GET_GALERA_FULLPATH().cstr(), VANILLA_GALERA );
return tRes;
}
TlsMsg::ResetErr();
auto eLogLvl = LogLevel_e::ERROR_;
AT_SCOPE_EXIT ( [&eLogLvl] { if ( TlsMsg::HasErr() ) WsrepLog ( eLogLvl, TlsMsg::szError()); TlsMsg::ResetErr(); } );
std::array<const char*, 2> sSyms { "wsrep_loader", "wsrep_interface_version" };
const char** pszIfaceVer = nullptr;
std::array<void**, 2> ppSyms { (void**)&tRes.m_WsrepLoaderFn, (void**)&pszIfaceVer };
if ( !tRes.m_pLibrary->LoadSymbols ( (const char**)sSyms.data(), (void***)ppSyms.data(), 2 ) )
{
TlsMsg::Err ( "can't load wsrep provider" );
return tRes;
}
tRes.m_iIfaceVer = atoi ( *pszIfaceVer );
return tRes;
}
Wsrep::Provider_i* Wsrep::MakeProvider ( Wsrep::Cluster_i* pCluster, CSphString sName, const char* szListenAddr, const char* szIncoming, const char* szPath, const char* szOptions )
{
auto tLoader = TryWsrep();
if ( !tLoader.m_pLibrary )
return nullptr;
if ( tLoader.m_iIfaceVer == 25 )
return MakeProviderV25 ( std::move ( tLoader ), pCluster, std::move ( sName ), szListenAddr, szIncoming, szPath, szOptions );
if ( tLoader.m_iIfaceVer == 31 )
return MakeProviderV31 ( std::move ( tLoader ), pCluster, std::move ( sName ), szListenAddr, szIncoming, szPath, szOptions );
TlsMsg::Err ( "Wrong galera interface version. Got %d", tLoader.m_iIfaceVer );
return nullptr;
}
| 7,981
|
C++
|
.cpp
| 237
| 31.565401
| 180
| 0.680456
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,010
|
receiver_ctx.cpp
|
manticoresoftware_manticoresearch/src/replication/receiver_ctx.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "receiver_ctx.h"
#include "tracer.h"
#include "sphinxpq.h"
#include "accumulator.h"
#include "memio.h"
#include "serialize.h"
#include "sphinxrt.h"
#include "searchdaemon.h"
#include "searchdreplication.h"
// verbose logging of replcating transactions, ruled by this env variable
static bool LOG_LEVEL_RPL_TNX = val_from_env ( "MANTICORE_LOG_RPL_TNX", false );
#define LOG_COMPONENT_RPL_TNX ""
#define RPL_TNX LOGMSG ( RPL_DEBUG, RPL_TNX, RPL_TNX )
// data passed to Galera and used at callbacks
class ReceiverCtx_c final: public Wsrep::Receiver_i
{
// share of remote commands received between apply and commit callbacks
RtAccum_t m_tAcc; // apply fn, commit fn
CSphQuery m_tQuery;
CSphString m_sName; // name of serving cluster
Wsrep::Provider_i* m_pProvider = nullptr;
std::function<void()> m_fnOnClean;
private:
void Cleanup();
~ReceiverCtx_c() final;
public:
ReceiverCtx_c ( CSphString sName, Wsrep::Provider_i* pProvider, std::function<void()> fnOnClean );
ReceiverCtx_c ( const ReceiverCtx_c& ) = delete;
ReceiverCtx_c ( ReceiverCtx_c&& ) = delete;
ReceiverCtx_c& operator= ( const ReceiverCtx_c& ) = delete;
ReceiverCtx_c& operator= ( ReceiverCtx_c&& ) = delete;
// implementation of Wsrep::Receiver_i
bool ApplyWriteset ( ByteBlob_t tData, bool bIsolated ) final;
void ApplyUnordered ( ByteBlob_t tData ) final;
bool Commit ( const void* pHndTrx, uint32_t uFlags, const Wsrep::TrxMeta_t* pMeta, bool bCommit ) final;
private:
static bool PQAdd ( ReplicationCommand_t* pCmd, ByteBlob_t tReq );
};
Wsrep::Receiver_i* MakeReceiverCtx ( CSphString sName, Wsrep::Provider_i* pProvider, std::function<void()> fnOnClean )
{
return new ReceiverCtx_c ( std::move ( sName ), pProvider, std::move(fnOnClean) );
}
ReceiverCtx_c::ReceiverCtx_c ( CSphString sName, Wsrep::Provider_i * pProvider, std::function<void()> fnOnClean )
: m_sName {std::move ( sName )}
, m_pProvider ( pProvider )
, m_fnOnClean { std::move ( fnOnClean ) }
{}
ReceiverCtx_c::~ReceiverCtx_c ()
{
Cleanup ();
}
void ReceiverCtx_c::Cleanup ()
{
m_tAcc.Cleanup();
m_tQuery.m_dFilters.Reset();
m_tQuery.m_dFilterTree.Reset();
if ( m_fnOnClean )
m_fnOnClean();
}
bool ReceiverCtx_c::PQAdd ( ReplicationCommand_t* pCmd, ByteBlob_t tReq )
{
assert ( pCmd && pCmd->m_eCommand == ReplCmd_e::PQUERY_ADD );
cServedIndexRefPtr_c pServed = GetServed ( pCmd->m_sIndex );
if ( !pServed )
{
sphWarning ( "unknown table '%s' for replication, command %d", pCmd->m_sIndex.cstr(), (int)pCmd->m_eCommand );
return false;
}
if ( pServed->m_eType != IndexType_e::PERCOLATE )
{
sphWarning ( "wrong type of table '%s' for replication, command %d", pCmd->m_sIndex.cstr(), (int)pCmd->m_eCommand );
return false;
}
StoredQueryDesc_t tPQ;
LoadStoredQuery ( tReq, tPQ );
RPL_TNX << "pq-add, table '" << pCmd->m_sIndex.cstr() << "', uid " << tPQ.m_iQUID << " query " << tPQ.m_sQuery.cstr();
CSphString sError;
PercolateQueryArgs_t tArgs ( tPQ );
tArgs.m_bReplace = true;
pCmd->m_pStored = RIdx_T<PercolateIndex_i*> ( pServed )->CreateQuery ( tArgs, sError );
if ( !pCmd->m_pStored )
{
sphWarning ( "pq-add replication error '%s', table '%s'", sError.cstr(), pCmd->m_sIndex.cstr() );
return false;
}
return true;
}
// callback for Galera to parse replicated commands
bool ReceiverCtx_c::ApplyWriteset ( ByteBlob_t tData, bool bIsolated )
{
MemoryReader_c tReader ( tData );
while ( tReader.GetPos() < tData.second )
{
auto pCmd = std::make_unique<ReplicationCommand_t>();
if ( !LoadCmdHeader ( tReader, pCmd.get() ))
{
sphWarning ( "%s", TlsMsg::szError() );
return false;
}
auto iRequestLen = (int)tReader.GetDword();
if ( iRequestLen + tReader.GetPos() > tData.second )
{
sphWarning ( "replication parse apply - out of buffer read %d+%d of %d", tReader.GetPos(), iRequestLen, tData.second );
return false;
}
const BYTE * pRequest = tData.first + tReader.GetPos();
tReader.SetPos ( tReader.GetPos() + iRequestLen );
pCmd->m_sCluster = m_sName;
pCmd->m_bIsolated = bIsolated;
ByteBlob_t tReq { pRequest, iRequestLen };
switch ( pCmd->m_eCommand )
{
case ReplCmd_e::PQUERY_ADD:
if ( !PQAdd ( pCmd.get(), tReq ) )
return false;
break;
case ReplCmd_e::PQUERY_DELETE:
LoadDeleteQuery ( tReq, pCmd->m_dDeleteQueries, pCmd->m_sDeleteTags );
RPL_TNX << "pq-delete, table '" << pCmd->m_sIndex.cstr() << "', queries " << pCmd->m_dDeleteQueries.GetLength() << ", tags " << pCmd->m_sDeleteTags.scstr();
break;
case ReplCmd_e::TRUNCATE:
RPL_TNX << "pq-truncate, table '" << pCmd->m_sIndex.cstr() << "'";
break;
case ReplCmd_e::CLUSTER_ALTER_ADD:
pCmd->m_bCheckIndex = false;
RPL_TNX << "pq-cluster-alter-add, table '" << pCmd->m_sIndex.cstr() << "'";
break;
case ReplCmd_e::CLUSTER_ALTER_DROP:
RPL_TNX << "pq-cluster-alter-drop, table '" << pCmd->m_sIndex.cstr() << "'";
break;
case ReplCmd_e::RT_TRX:
m_tAcc.LoadRtTrx ( tReq, pCmd->m_uVersion );
RPL_TNX << "rt trx, table '" << pCmd->m_sIndex.cstr() << "'";
break;
case ReplCmd_e::UPDATE_API:
pCmd->m_pUpdateAPI = new CSphAttrUpdate;
LoadAttrUpdate ( pRequest, iRequestLen, *pCmd->m_pUpdateAPI, pCmd->m_bBlobUpdate );
RPL_TNX << "update, table '" << pCmd->m_sIndex.cstr() << "'";
break;
case ReplCmd_e::UPDATE_QL:
case ReplCmd_e::UPDATE_JSON:
{
// can not handle multiple updates - only one update at time
assert ( !m_tQuery.m_dFilters.GetLength() );
pCmd->m_pUpdateAPI = new CSphAttrUpdate;
int iGot = LoadAttrUpdate ( pRequest, iRequestLen, *pCmd->m_pUpdateAPI, pCmd->m_bBlobUpdate );
assert ( iGot<iRequestLen );
LoadUpdate ( pRequest + iGot, iRequestLen - iGot, m_tQuery );
pCmd->m_pUpdateCond = &m_tQuery;
RPL_TNX << "update " << ( pCmd->m_eCommand == ReplCmd_e::UPDATE_QL ? "ql" : "json" ) << ", table '" << pCmd->m_sIndex.cstr() << "'";
break;
}
default:
sphWarning ( "unsupported replication command %d", (int) pCmd->m_eCommand );
return false;
}
m_tAcc.m_dCmd.Add ( std::move ( pCmd ) );
}
if ( tReader.GetPos() == tData.second )
return true;
sphWarning ( "replication parse apply - out of buffer read %d of %d", tReader.GetPos(), tData.second );
return false;
}
void ReceiverCtx_c::ApplyUnordered ( ByteBlob_t foo )
{
sphLogDebugRpl ( "unordered byteblob size %d", foo.second );
}
bool ReceiverCtx_c::Commit ( const void* pHndTrx, uint32_t uFlags, const Wsrep::TrxMeta_t* pMeta, bool bCommit )
{
AT_SCOPE_EXIT ( [this] { Cleanup(); } );
if ( !bCommit || m_tAcc.m_dCmd.IsEmpty() )
return true;
bool bOk = true;
bool bIsolated = ( m_tAcc.m_dCmd[0]->m_bIsolated );
if ( bIsolated || !m_pProvider->GetApplier() ) {
bOk = HandleCmdReplicated ( m_tAcc );
} else
{
m_pProvider->GetApplier()->ApplierPreCommit ( pHndTrx );
bOk = HandleCmdReplicated ( m_tAcc );
m_pProvider->GetApplier()->ApplierInterimPostCommit ( pHndTrx );
}
if ( TlsMsg::HasErr ())
sphWarning ( "%s", TlsMsg::szError ());
sphLogDebugRpl ( "seq " INT64_FMT ", committed %d, isolated %d", (int64_t) pMeta->m_tGtid.m_iSeqNo, (int) bOk, (int) bIsolated );
return bOk;
}
| 7,597
|
C++
|
.cpp
| 198
| 35.858586
| 159
| 0.690499
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,011
|
cluster_file_send.cpp
|
manticoresoftware_manticoresearch/src/replication/cluster_file_send.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "cluster_file_send.h"
#include "nodes.h"
#include "recv_state.h"
class SendBuf_c
{
BYTE* m_pData = nullptr;
int m_iSize = 0;
int m_iLeft = 0;
public:
SendBuf_c() = default;
SendBuf_c ( BYTE* pData, int iSize )
: m_pData ( pData )
, m_iSize ( iSize )
{}
[[nodiscard]] inline BYTE* Begin() const noexcept { return m_pData; }
[[nodiscard]] inline int GetLength() const noexcept { return m_iSize; }
[[nodiscard]] inline BYTE* Data() const noexcept
{
int iOff = m_iSize - m_iLeft;
return m_pData + iOff;
}
[[nodiscard]] inline int Consumed() const noexcept { return m_iSize - m_iLeft; }
[[nodiscard]] inline int Left() const noexcept { return m_iLeft; }
inline void Consume ( int iLen ) noexcept
{
assert ( iLen <= m_iLeft );
m_iLeft -= iLen;
}
inline void Rewind() { m_iLeft = m_iSize; }
};
struct ClusterFileSendRequest_t
{
uint64_t m_tWriterKey = 0;
SendBuf_c m_tSendBuf;
int m_iFile = 0;
CSphVector<FileOp_t> m_dOps;
};
void operator<< ( ISphOutputBuffer& tOut, const ClusterFileSendRequest_t& tReq )
{
tOut.SendUint64 ( tReq.m_tWriterKey );
tOut.SendInt ( tReq.m_tSendBuf.Consumed() );
tOut.SendInt ( tReq.m_iFile );
tOut.SendBytes ( tReq.m_tSendBuf.Begin(), tReq.m_tSendBuf.Consumed() );
tOut.SendInt ( tReq.m_dOps.GetLength() );
for ( const FileOp_t& tItem : tReq.m_dOps )
{
tOut.SendByte ( (BYTE)tItem.m_eOp );
tOut.SendUint64 ( tItem.m_iSize );
tOut.SendUint64 ( tItem.m_iOffFile );
tOut.SendDword ( tItem.m_iOffBuf );
}
}
StringBuilder_c& operator<< ( StringBuilder_c& tOut, const ClusterFileSendRequest_t& tReq )
{
tOut << "writerkey:" << tReq.m_tWriterKey;
tOut << "consumed:" << tReq.m_tSendBuf.Consumed();
tOut << "file:" << tReq.m_iFile;
tOut << "ops:" << tReq.m_dOps.GetLength();
return tOut;
}
void operator>> ( InputBuffer_c& tBuf, ClusterFileSendRequest_t& tReq )
{
tReq.m_tWriterKey = tBuf.GetUint64();
int iSize = tBuf.GetInt();
tReq.m_iFile = tBuf.GetInt();
if ( iSize )
{
const BYTE* pData = nullptr;
tBuf.GetBytesZerocopy ( &pData, iSize );
tReq.m_tSendBuf = SendBuf_c ( (BYTE*)pData, iSize );
}
int iCount = tBuf.GetDword();
tReq.m_dOps.Resize ( iCount );
for ( FileOp_t& tItem : tReq.m_dOps )
{
tItem.m_eOp = (FileOp_e)tBuf.GetByte();
tItem.m_iSize = tBuf.GetUint64();
tItem.m_iOffFile = tBuf.GetUint64();
tItem.m_iOffBuf = tBuf.GetDword();
}
}
struct ClusterFileSendReply_t
{
int m_iFile = -1;
WriteResult_e m_eRes { WriteResult_e::WRITE_FAILED };
CSphString m_sWarning;
};
void operator<< ( ISphOutputBuffer& tOut, const ClusterFileSendReply_t& tReq )
{
tOut.SendDword ( tReq.m_iFile );
tOut.SendByte ( (BYTE)tReq.m_eRes );
tOut.SendString ( tReq.m_sWarning.cstr() );
}
StringBuilder_c& operator<< ( StringBuilder_c& tOut, const ClusterFileSendReply_t& tReq )
{
tOut << "ifile:" << tReq.m_iFile
<< "eres" << (BYTE)tReq.m_eRes
<< "warning:" << tReq.m_sWarning;
return tOut;
}
void operator>> ( InputBuffer_c& tBuf, ClusterFileSendReply_t& tReq )
{
tReq.m_iFile = tBuf.GetDword();
tReq.m_eRes = (WriteResult_e)tBuf.GetByte();
tReq.m_sWarning = tBuf.GetString();
}
// API command to remote node of file send
using ClusterFileSend_c = ClusterCommand_T<E_CLUSTER::FILE_SEND, ClusterFileSendRequest_t, ClusterFileSendReply_t>;
struct FileReader_t
{
CSphAutofile m_tFile;
ClusterFileSendRequest_t m_tFileSendRequest;
const AgentDesc_t * m_pAgentDesc = nullptr;
const SyncSrc_t * m_pSyncSrc = nullptr;
const SyncDst_t * m_pSyncDst = nullptr;
bool m_bDone = false;
bool m_bSuccess = false;
int m_iPackets = 1;
int m_iChunk { 0 };
int m_iFileRetries { 0 };
public:
bool Next ( StringBuilder_c & sErrors );
bool StartFile ( int iFile, StringBuilder_c& sErrors );
void RetryFile ( int iRemoteFile, bool bNetError, WriteResult_e eRes, StringBuilder_c& tErrors );
private:
bool ReadChunk ( int64_t iFileOff, int iSize, StringBuilder_c& sErrors );
void LogFileSend() const;
bool AddChunks ( StringBuilder_c& sErrors );
[[nodiscard]] int NextFile ( int iCurFile ) const noexcept;
[[nodiscard]] int NextChunk ( int iCurChunk, const FileChunks_t& tChunk ) const noexcept;
};
int FileReader_t::NextFile ( int iCurFile ) const noexcept
{
assert ( m_pSyncSrc );
assert ( m_pSyncDst );
const int iFiles = m_pSyncSrc->m_dBaseNames.GetLength();
for ( ; iCurFile < iFiles; ++iCurFile )
{
if ( !m_pSyncDst->m_dNodeChunksMask.BitGet ( iCurFile ) )
break;
}
return iCurFile;
}
int FileReader_t::NextChunk ( int iCurChunk, const FileChunks_t& tChunk ) const noexcept
{
assert ( m_pSyncDst );
const int iChunks = tChunk.GetChunksCount();
for ( ; iCurChunk < iChunks; ++iCurChunk )
{
if ( !m_pSyncDst->m_dNodeChunksMask.BitGet ( tChunk.m_iHashStartItem + iCurChunk ) )
break;
}
return iCurChunk;
}
bool FileReader_t::ReadChunk ( int64_t iFileOff, int iSize, StringBuilder_c& sErrors )
{
if ( !iSize )
return true;
assert ( m_pSyncSrc );
assert ( m_tFileSendRequest.m_iFile >= 0 && m_tFileSendRequest.m_iFile < m_pSyncSrc->m_dBaseNames.GetLength() );
if ( m_tFile.GetFD() == -1 )
{
CSphString sReaderError;
if ( m_tFile.Open ( m_pSyncSrc->m_dIndexFiles[m_tFileSendRequest.m_iFile], SPH_O_READ, sReaderError, false ) < 0 )
{
sErrors += sReaderError.cstr();
return false;
}
}
SendBuf_c& tBuf = m_tFileSendRequest.m_tSendBuf;
assert ( iSize <= tBuf.Left() );
// seek and read new chunk
while ( iSize>0 )
{
auto iRes = sphPread ( m_tFile.GetFD(), tBuf.Data(), iSize, iFileOff );
if ( iRes < 0 )
{
sErrors.Appendf ( "pread error %d '%s'", errno, strerrorm ( errno ) );
return false;
};
tBuf.Consume ( iRes );
iSize -= iRes; // according to man, pread may return not whole requested chunk, and that is not error
}
return true;
}
void FileReader_t::LogFileSend () const
{
sphLogDebugRpl ( "sending file %s (%d) to %s:%d, packets %d, timeout %d.%03d sec", m_pSyncSrc->m_dBaseNames[m_tFileSendRequest.m_iFile].cstr(), m_tFileSendRequest.m_iFile, m_pAgentDesc->m_sAddr.cstr(), m_pAgentDesc->m_iPort, m_iPackets, (int)( m_pSyncDst->m_tmTimeoutFile / 1000 ), (int)( m_pSyncDst->m_tmTimeoutFile % 1000 ) );
}
// add chunks copied from at joiner node along with data send from donor node
bool FileReader_t::AddChunks ( StringBuilder_c& sErrors )
{
assert ( m_pSyncSrc );
assert ( m_pSyncDst );
assert ( m_tFileSendRequest.m_iFile < m_pSyncSrc->m_dBaseNames.GetLength() );
const FileChunks_t& tChunks = m_pSyncSrc->m_dChunks[m_tFileSendRequest.m_iFile];
const int iChunks = tChunks.GetChunksCount();
auto& dOps = m_tFileSendRequest.m_dOps;
int iChunk = m_iChunk;
while ( iChunk < iChunks )
{
int iCopyChunk = NextChunk ( iChunk, tChunks );
if ( iCopyChunk != iChunk ) // copy data from joiner local file
{
FileOp_t& tOp = dOps.Add();
tOp.m_eOp = FileOp_e::COPY_FILE;
tOp.m_iOffFile = tChunks.GetChunkFileOffset ( iChunk );
if ( iCopyChunk < iChunks )
tOp.m_iSize = tChunks.GetChunkFileOffset ( iCopyChunk ) - tOp.m_iOffFile; // (int64_t)m_iChunkBytes * iCopyChunk - m_iChunkBytes * iChunk;
else
tOp.m_iSize = tChunks.m_iFileSize - tOp.m_iOffFile;
}
if ( iCopyChunk < iChunks )
{
FileOp_t tOp;
tOp.m_iOffFile = tChunks.GetChunkFileOffset ( iCopyChunk );
tOp.m_iSize = tChunks.GetChunkFileLength ( iCopyChunk );
tOp.m_iOffBuf = m_tFileSendRequest.m_tSendBuf.Consumed();
if ( tOp.m_iSize > m_tFileSendRequest.m_tSendBuf.Left() )
{
iChunk = iCopyChunk; // last chunk that was already processed
break;
}
if ( !ReadChunk ( tOp.m_iOffFile, tOp.m_iSize, sErrors ) )
return false;
tOp.m_eOp = FileOp_e::COPY_BUFFER;
dOps.Add ( tOp );
}
iChunk = iCopyChunk + 1;
}
m_iChunk = iChunk;
if ( iChunk >= iChunks )
dOps.Add().m_eOp = FileOp_e::VERIFY_FILE;
return true;
}
bool FileReader_t::StartFile ( int iFile, StringBuilder_c& sErrors )
{
auto& dOps = m_tFileSendRequest.m_dOps;
dOps.Resize ( 0 );
assert ( m_pSyncSrc );
assert ( m_pSyncDst );
m_tFileSendRequest.m_iFile = NextFile ( iFile );
m_iChunk = 0;
m_tFileSendRequest.m_tSendBuf.Rewind();
LogFileSend ( );
return AddChunks ( sErrors );
}
bool FileReader_t::Next ( StringBuilder_c & sErrors )
{
if ( m_bDone )
return true;
assert ( m_pSyncSrc );
assert ( m_pSyncDst );
const SyncSrc_t* pSrc = m_pSyncSrc;
const int iFiles = pSrc->m_dBaseNames.GetLength();
assert ( m_tFileSendRequest.m_iFile < iFiles );
auto& dOps = m_tFileSendRequest.m_dOps;
dOps.Resize ( 0 );
m_tFileSendRequest.m_tSendBuf.Rewind();
// check for chunks left in last file that was already sent
if ( m_iChunk >= pSrc->m_dChunks[m_tFileSendRequest.m_iFile].GetChunksCount() )
{
m_tFile.Close();
m_iChunk = 0;
m_tFileSendRequest.m_iFile = NextFile ( m_tFileSendRequest.m_iFile + 1 );
if ( m_tFileSendRequest.m_iFile == iFiles )
{
m_bDone = true;
m_bSuccess = true;
return true;
}
m_iFileRetries = ReplicationFileRetryCount();
LogFileSend ( );
}
return AddChunks ( sErrors );
}
void ReportSendStat ( const VecTraits_T<AgentConn_t *> & dNodes, const VecTraits_T<FileReader_t> & dReaders, const CSphString & sCluster, const CSphString & sIndex )
{
if ( g_eLogLevel<SPH_LOG_RPL_DEBUG )
return;
int iTotal = 0;
StringBuilder_c tLog;
tLog.Sprintf ( "file sync packets sent '%s:%s' to ", sCluster.cstr(), sIndex.cstr() );
ARRAY_FOREACH ( iAgent, dNodes )
{
const AgentConn_t * pAgent = dNodes[iAgent];
const FileReader_t & tReader = dReaders[iAgent];
tLog.Sprintf ( "'%s:%d':%d,", pAgent->m_tDesc.m_sAddr.cstr(), pAgent->m_tDesc.m_iPort, tReader.m_iPackets );
iTotal += tReader.m_iPackets;
}
tLog.Sprintf ( " total:%d", iTotal );
sphLogDebugRpl ( "%s", tLog.cstr() );
}
static void ReportErrorSendFile ( StringBuilder_c& tErrors, const char* sFmt, ... ) __attribute__ ( ( format ( printf, 2, 3 ) ) );
void ReportErrorSendFile ( StringBuilder_c& tErrors, const char* sFmt, ... )
{
CSphString sError;
va_list ap;
va_start ( ap, sFmt );
sError.SetSprintfVa ( sFmt, ap );
va_end ( ap );
tErrors += sError.cstr();
sphLogDebugRpl ( "%s", sError.cstr() );
}
void FileReader_t::RetryFile ( int iRemoteFile, bool bNetError, WriteResult_e eRes, StringBuilder_c& tErrors )
{
assert ( m_tFileSendRequest.m_iFile >= 0 && m_tFileSendRequest.m_iFile < m_pSyncSrc->m_dBaseNames.GetLength() );
// validate and report joiner errors
if ( !bNetError )
{
if ( eRes != WriteResult_e::VERIFY_FAILED )
{
ReportErrorSendFile ( tErrors, "file %s (%d) write error at remote node to %s:%d, retry %d", m_pSyncSrc->m_dBaseNames[m_tFileSendRequest.m_iFile].cstr(), m_tFileSendRequest.m_iFile, m_pAgentDesc->m_sAddr.cstr(), m_pAgentDesc->m_iPort, m_iFileRetries );
m_bDone = true;
return;
}
if ( m_tFileSendRequest.m_iFile != iRemoteFile )
ReportErrorSendFile ( tErrors, "retry file %s to %s:%d, file mismatch: reported %d, current %d", m_pSyncSrc->m_dBaseNames[m_tFileSendRequest.m_iFile].cstr(), m_pAgentDesc->m_sAddr.cstr(), m_pAgentDesc->m_iPort, iRemoteFile, m_tFileSendRequest.m_iFile );
}
--m_iFileRetries;
sphLogDebugRpl ( "sending file %s to %s:%d, retry %d", m_pSyncSrc->m_dBaseNames[m_tFileSendRequest.m_iFile].cstr(), m_pAgentDesc->m_sAddr.cstr(), m_pAgentDesc->m_iPort, m_iFileRetries );
if ( m_iFileRetries <= 0 )
{
ReportErrorSendFile ( tErrors, "retry file %s to %s:%d, limit exceeded", m_pSyncSrc->m_dBaseNames[m_tFileSendRequest.m_iFile].cstr(), m_pAgentDesc->m_sAddr.cstr(), m_pAgentDesc->m_iPort );
m_bDone = true;
return;
}
m_bDone = !StartFile ( m_tFileSendRequest.m_iFile, tErrors );
}
// send file to multiple nodes by chunks as API command CLUSTER_FILE_SEND
bool RemoteClusterFileSend ( const SyncSrc_t & tSigSrc, const CSphVector<RemoteFileState_t> & dDesc, const CSphString & sCluster, const CSphString & sIndex )
{
StringBuilder_c tErrors ( ";" );
// setup buffers
CSphFixedVector<BYTE> dReadBuf ( tSigSrc.m_iBufferSize * dDesc.GetLength() );
CSphFixedVector<FileReader_t> dReaders ( dDesc.GetLength() );
uint64_t tKey = DoubleStringKey ( sCluster, sIndex );
ARRAY_FOREACH ( iNode, dReaders )
{
// setup file readers
FileReader_t& tReader = dReaders[iNode];
tReader.m_pAgentDesc = dDesc[iNode].m_pAgentDesc;
tReader.m_pSyncSrc = dDesc[iNode].m_pSyncSrc;
tReader.m_pSyncDst = dDesc[iNode].m_pSyncDst;
tReader.m_tFileSendRequest.m_tSendBuf = SendBuf_c ( dReadBuf.Begin() + tSigSrc.m_iBufferSize * iNode, tSigSrc.m_iBufferSize );
tReader.m_tFileSendRequest.m_tWriterKey = tKey;
tReader.m_iFileRetries = ReplicationFileRetryCount();
if ( !tReader.StartFile ( 0, tErrors ) )
return TlsMsg::Err ( "%s", tErrors.cstr() );
assert ( !tReader.m_bDone );
}
// create agents
VecRefPtrs_t<AgentConn_t*> dNodes;
dNodes.Resize ( dReaders.GetLength() );
ARRAY_FOREACH ( i, dReaders )
dNodes[i] = ClusterFileSend_c::CreateAgent ( *dReaders[i].m_pAgentDesc, dReaders[i].m_pSyncDst->m_tmTimeoutFile, dReaders[i].m_tFileSendRequest );
// submit initial jobs
CSphRefcountedPtr<RemoteAgentsObserver_i> tReporter ( GetObserver() );
ClusterFileSend_c tReq;
ScheduleDistrJobs ( dNodes, &tReq, &tReq, tReporter, ReplicationFileRetryCount(), ReplicationFileRetryDelay() );
bool bDone = false;
while ( !bDone )
{
// don't forget to check incoming replies after send was over
bDone = tReporter->IsDone();
// wait one or more remote queries to complete
if ( !bDone )
tReporter->WaitChanges();
ARRAY_FOREACH ( iAgent, dNodes )
{
AgentConn_t* pAgent = dNodes[iAgent];
if ( !pAgent->m_bSuccess )
continue;
FileReader_t& tReader = dReaders[iAgent];
if ( tReader.m_bDone )
continue;
const ClusterFileSendReply_t& tReply = ClusterFileSend_c::GetRes ( *pAgent );
bool bFileWritten = ( tReply.m_eRes == WriteResult_e::OK );
bool bNetError = ( !pAgent->m_sFailure.IsEmpty() );
// report errors first
if ( bNetError )
ReportErrorSendFile ( tErrors, "'%s:%d' %s", pAgent->m_tDesc.m_sAddr.cstr(), pAgent->m_tDesc.m_iPort, pAgent->m_sFailure.cstr() );
pAgent->m_sFailure = "";
if ( !tReply.m_sWarning.IsEmpty() )
ReportErrorSendFile ( tErrors, "'%s:%d' %s", pAgent->m_tDesc.m_sAddr.cstr(), pAgent->m_tDesc.m_iPort, tReply.m_sWarning.cstr() );
if ( !bFileWritten )
{
tReader.RetryFile ( tReply.m_iFile, bNetError, tReply.m_eRes, tErrors );
} else if ( !tReader.Next ( tErrors ) )
{
pAgent->m_bSuccess = false;
tReader.m_bDone = true;
}
if ( tReader.m_bDone )
continue;
// remove agent from main vector
pAgent->Release();
AgentConn_t* pNextJob = ClusterFileSend_c::CreateAgent ( *tReader.m_pAgentDesc, tReader.m_pSyncDst->m_tmTimeoutFile, tReader.m_tFileSendRequest );
dNodes[iAgent] = pNextJob;
VectorAgentConn_t dNewNode;
dNewNode.Add ( pNextJob );
ScheduleDistrJobs ( dNewNode, &tReq, &tReq, tReporter, ReplicationFileRetryCount(), ReplicationFileRetryDelay() );
// reset done flag to process new item
bDone = false;
++tReader.m_iPackets;
}
}
if ( !tErrors.IsEmpty() )
TlsMsg::Err ( "%s", tErrors.cstr() );
ReportSendStat ( dNodes, dReaders, sCluster, sIndex );
return dReaders.all_of ( [] ( const auto& tReader ) { return tReader.m_bSuccess; } );
}
// command at remote node for CLUSTER_FILE_SEND to store data into file, data size and file offset defined by sender
void ReceiveClusterFileSend ( ISphOutputBuffer & tOut, InputBuffer_c & tBuf )
{
ClusterFileSendRequest_t tCmd;
ClusterFileSend_c::ParseRequest ( tBuf, tCmd );
ClusterFileSendReply_t tRes;
assert ( RecvState::HasState ( tCmd.m_tWriterKey ) );
auto& tState = RecvState::GetState ( tCmd.m_tWriterKey );
VecTraits_T<BYTE> dBuf ( tCmd.m_tSendBuf.Begin(), tCmd.m_tSendBuf.GetLength() );
tRes.m_eRes = tState.Write ( tCmd.m_iFile, tCmd.m_dOps, dBuf );
tRes.m_iFile = tCmd.m_iFile;
if ( tRes.m_eRes != WriteResult_e::OK )
{
tRes.m_sWarning.SetSprintf ( "write finished %s (%d), file %d, operations %d", TlsMsg::szError(), (int)tRes.m_eRes, tCmd.m_iFile, tCmd.m_dOps.GetLength() );
sphWarning ( "%s", tRes.m_sWarning.cstr() );
}
ClusterFileSend_c::BuildReply ( tOut, tRes );
TlsMsg::ResetErr();
}
| 16,461
|
C++
|
.cpp
| 440
| 34.852273
| 329
| 0.706104
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,012
|
cluster_synced.cpp
|
manticoresoftware_manticoresearch/src/replication/cluster_synced.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "cluster_synced.h"
#include "cluster_commands.h"
#include "api_command_cluster.h"
#include "common.h"
void operator<< ( ISphOutputBuffer& tOut, const ClusterSyncedRequest_t& tReq )
{
tOut << (const ClusterRequest_t&)tReq;
tOut.SendBytes ( tReq.m_tGtid.m_tUuid.data(), 16 );
tOut.SendUint64 ( tReq.m_tGtid.m_iSeqNo );
tOut.SendByte ( tReq.m_bSendFilesSuccess );
tOut.SendString ( tReq.m_sMsg.cstr() );
tOut << tReq.m_dIndexes;
}
StringBuilder_c& operator<< ( StringBuilder_c& tOut, const ClusterSyncedRequest_t& tReq )
{
tOut << (const ClusterRequest_t&)tReq;
tOut << "gtid:" << Wsrep::Gtid2Str ( tReq.m_tGtid );
tOut << "filesendsuccess:" << (BYTE)tReq.m_bSendFilesSuccess;
tOut << "msg:" << tReq.m_sMsg;
tOut << "indexes:" << tReq.m_dIndexes;
return tOut;
}
void operator>> ( InputBuffer_c& tIn, ClusterSyncedRequest_t& tReq )
{
tIn >> (ClusterRequest_t&)tReq;
tIn.GetBytes ( tReq.m_tGtid.m_tUuid.data(), 16 );
tReq.m_tGtid.m_iSeqNo = tIn.GetUint64();
tReq.m_bSendFilesSuccess = !!tIn.GetByte();
tReq.m_sMsg = tIn.GetString();
tIn >> tReq.m_dIndexes;
}
// API command to remote node to issue cluster synced callback
using ClusterSynced_c = ClusterCommand_T<E_CLUSTER::SYNCED, ClusterSyncedRequest_t>;
// API command to remote node to issue cluster synced callback
bool SendClusterSynced ( const VecAgentDesc_t& dDesc, const ClusterSyncedRequest_t& tRequest )
{
ClusterSynced_c tReq;
auto dNodes = tReq.MakeAgents ( dDesc, ReplicationTimeoutQuery(), tRequest );
return PerformRemoteTasksWrap ( dNodes, tReq, tReq, true );
}
void ReceiveClusterSynced ( ISphOutputBuffer & tOut, InputBuffer_c & tBuf, CSphString& sCluster )
{
ClusterSyncedRequest_t tCmd;
ClusterSynced_c::ParseRequest ( tBuf, tCmd );
sCluster = tCmd.m_sCluster;
if ( ClusterSynced ( tCmd ))
ClusterSynced_c::BuildReply ( tOut );
}
| 2,314
|
C++
|
.cpp
| 59
| 37.644068
| 97
| 0.742883
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,013
|
cluster_update_nodes.cpp
|
manticoresoftware_manticoresearch/src/replication/cluster_update_nodes.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "cluster_update_nodes.h"
#include "api_command_cluster.h"
#include "common.h"
#include "nodes.h"
// API command to remote node to update nodes by nodes it sees
using ClusterUpdateNodes_c = ClusterCommand_T<E_CLUSTER::UPDATE_NODES, UpdateNodesRequest_t>;
void operator<< ( ISphOutputBuffer& tOut, const UpdateNodesRequest_t& tReq )
{
tOut << (const ClusterRequest_t&)tReq;
tOut.SendByte ( static_cast<bool> ( tReq.m_eKindNodes ) ? 1 : 0 );
}
void operator>> ( InputBuffer_c& tIn, UpdateNodesRequest_t& tReq )
{
tIn >> (ClusterRequest_t&)tReq;
tReq.m_eKindNodes = (NODES_E)( tIn.GetByte() ? 1 : 0 );
}
bool SendClusterUpdateNodes ( const CSphString& sCluster, NODES_E eNodes, const VecTraits_T<CSphString>& dNodes )
{
ClusterUpdateNodes_c::REQUEST_T tRequest;
tRequest.m_sCluster = sCluster;
tRequest.m_eKindNodes = eNodes;
auto dAgents = ClusterUpdateNodes_c::MakeAgents ( GetDescAPINodes ( dNodes, Resolve_e::SLOW ), ReplicationTimeoutQuery(), tRequest );
// no nodes left seems a valid case
if ( dAgents.IsEmpty() )
return true;
ClusterUpdateNodes_c tReq;
return PerformRemoteTasksWrap ( dAgents, tReq, tReq, true );
}
void ReceiveClusterUpdateNodes ( ISphOutputBuffer& tOut, InputBuffer_c& tBuf, CSphString& sCluster )
{
UpdateNodesRequest_t tRequest;
ClusterUpdateNodes_c::ParseRequest ( tBuf, tRequest );
sCluster = tRequest.m_sCluster;
if ( ClusterUpdateNodes ( tRequest.m_sCluster, tRequest.m_eKindNodes ) )
ClusterUpdateNodes_c::BuildReply ( tOut );
}
| 1,972
|
C++
|
.cpp
| 47
| 40.340426
| 134
| 0.760438
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,014
|
cluster_index_add_local.cpp
|
manticoresoftware_manticoresearch/src/replication/cluster_index_add_local.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "cluster_index_add_local.h"
#include "api_command_cluster.h"
#include "cluster_commands.h"
#include "digest_sha1.h"
#include "recv_state.h"
#include <cmath>
void operator<< ( ISphOutputBuffer& tOut, const ClusterIndexAddLocalRequest_t& tReq )
{
tOut << (const ClusterRequest_t&) tReq;
tOut.SendString ( tReq.m_sIndex.cstr() );
tOut.SendByte ( (BYTE)tReq.m_eIndex );
tOut.SendByte ( tReq.m_bSendFilesSuccess );
}
StringBuilder_c& operator<< ( StringBuilder_c& tOut, const ClusterIndexAddLocalRequest_t& tReq )
{
tOut << (const ClusterRequest_t&)tReq;
tOut << "index:" << tReq.m_sIndex;
tOut << "type:" << (BYTE)tReq.m_eIndex;
tOut << "SendFilesSuccess:" << tReq.m_bSendFilesSuccess;
return tOut;
}
void operator>> ( MemInputBuffer_c& tIn, ClusterIndexAddLocalRequest_t& tReq )
{
tIn >> (ClusterRequest_t&)tReq;
tReq.m_sIndex = tIn.GetString();
tReq.m_eIndex = (IndexType_e)tIn.GetByte();
tReq.m_bSendFilesSuccess = !!tIn.GetByte();
}
struct FilesTrait_t
{
StrVec_t m_dOld;
StrVec_t m_dRef;
void Reset()
{
m_dOld.Reset();
m_dRef.Reset();
}
};
struct ScopedState_t
{
explicit ScopedState_t ( uint64_t uState )
: m_uState ( uState )
{}
~ScopedState_t() { RecvState::Free ( m_uState ); }
uint64_t m_uState = 0;
};
struct RollbackFilesGuard_t
{
RollbackFilesGuard_t() = default;
~RollbackFilesGuard_t()
{
assert ( m_tFiles.m_dOld.GetLength() == m_tFiles.m_dRef.GetLength() );
ARRAY_FOREACH ( i, m_tFiles.m_dOld )
{
if ( sph::rename ( m_tFiles.m_dOld[i].cstr(), m_tFiles.m_dRef[i].cstr() ) != 0 )
sphWarning ( "rollback rename %s to %s failed, error %s (%d)", m_tFiles.m_dOld[i].cstr(), m_tFiles.m_dRef[i].cstr(), strerrorm ( errno ), errno );
}
}
FilesTrait_t m_tFiles;
};
static void RemoveFiles ( const sph::StringSet& hActiveFiles, const StrVec_t& dFiles )
{
for ( const auto& sFile : dFiles )
{
if ( hActiveFiles[sFile] )
continue;
if ( !sphFileExists ( sFile.cstr() ) )
continue;
if ( ::unlink ( sFile.cstr() ) )
sphWarning ( "failed to unlink file %s, error %s (%d)", sFile.cstr(), strerrorm ( errno ), errno );
}
}
static void RemoveFiles ( const FilesTrait_t& tIndexFiles, const StrVec_t& dRenamedOld )
{
sph::StringSet hActiveFiles;
for ( const auto& sFile : tIndexFiles.m_dRef )
hActiveFiles.Add ( sFile );
RemoveFiles ( hActiveFiles, tIndexFiles.m_dOld );
RemoveFiles ( hActiveFiles, dRenamedOld );
}
static bool RotateFiles ( const std::unique_ptr<MergeState_t>& pState, FilesTrait_t& tFilesGuard )
{
assert ( pState );
int iMatched = 0;
for ( int i = 0; i < pState->m_dFilesNew.GetLength(); i++ )
iMatched += ( pState->m_dMergeMask.BitGet ( i ) ? 1 : 0 );
// all matched files - no .new files these should be rotated at joiner
// using joiner files as is
if ( iMatched == pState->m_dFilesNew.GetLength() )
return true;
// rename index files to old
StrVec_t dFilesRef2Old;
StrVec_t dFilesOld;
// rename new to index files
StrVec_t dFilesNew;
StrVec_t dFilesNew2Ref;
for ( int iFile = 0; iFile < pState->m_dFilesNew.GetLength(); iFile++ )
{
// keep joiner file as is if whole file matched
if ( pState->m_dMergeMask.BitGet ( iFile ) )
continue;
const CSphString& sNameNew = pState->m_dFilesNew[iFile];
const CSphString& sNameRef = pState->m_dFilesRef[iFile];
assert ( sphFileExists ( sNameNew.cstr() ) );
dFilesNew.Add ( sNameNew );
dFilesNew2Ref.Add ( sNameRef );
if ( sphFileExists ( sNameRef.cstr(), nullptr ) )
{
dFilesRef2Old.Add ( sNameRef );
dFilesOld.Add().SetSprintf ( "%s.old", sNameRef.cstr() );
}
}
TLS_MSG_STRING ( sError );
if ( !RenameWithRollback ( dFilesRef2Old, dFilesOld, sError ) )
return false;
if ( !RenameWithRollback ( dFilesNew, dFilesNew2Ref, sError ) )
{
RenameFiles ( dFilesOld, dFilesRef2Old, sError );
return false;
}
tFilesGuard.m_dOld.SwapData ( dFilesOld );
tFilesGuard.m_dRef.SwapData ( dFilesRef2Old );
return true;
}
// load index from disk files for cluster use
static ServedIndexRefPtr_c LoadNewIndex ( const CSphString & sIndexPath, IndexType_e eIndexType, const char * szIndexName, FilesTrait_t & tIndexFiles )
{
CSphConfigSection hIndex;
hIndex.Add ( CSphVariant ( sIndexPath.cstr() ), "path" );
hIndex.Add ( CSphVariant ( GetIndexTypeName ( eIndexType ) ), "type" );
// dummy
hIndex.Add ( CSphVariant ( "text" ), "rt_field" );
hIndex.Add ( CSphVariant ( "gid" ), "rt_attr_uint" );
TLS_MSG_STRING ( sError );
ServedIndexRefPtr_c pResult;
auto [eAdd, pNewServed] = AddIndex ( szIndexName, hIndex, false, true, nullptr, sError );
assert ( eAdd == ADD_NEEDLOAD || eAdd == ADD_ERROR );
if ( eAdd != ADD_NEEDLOAD )
return pResult;
assert ( pNewServed );
StrVec_t dWarnings;
bool bPrealloc = PreallocNewIndex ( *pNewServed, &hIndex, szIndexName, dWarnings, sError );
if ( !bPrealloc )
return pResult;
UnlockedHazardIdxFromServed ( *pNewServed )->GetIndexFiles ( tIndexFiles.m_dRef, tIndexFiles.m_dRef );
for ( const auto& i : dWarnings )
sphWarning ( "table '%s': %s", szIndexName, i.cstr() );
pResult = std::move ( pNewServed );
return pResult;
}
// load index into daemon
static bool LoadAndReplaceIndex ( const CSphString& sIndexPath, IndexType_e eIndexType, const CSphString & sIndexName, FilesTrait_t & tIndexFiles )
{
auto pNewIndex = LoadNewIndex ( sIndexPath, eIndexType, sIndexName.cstr(), tIndexFiles );
if ( !pNewIndex )
return false;
g_pLocalIndexes->AddOrReplace ( pNewIndex, sIndexName );
return true;
}
// load index into daemon
// in case index already exists prohibit it to save on index delete as disk files has fresh data received from remote node
static bool LoadIndex ( const CSphString & sIndexPath, IndexType_e eIndexType, const CSphString & sIndexName, FilesTrait_t & tIndexFiles )
{
cServedIndexRefPtr_c pOldIndex = GetServed ( sIndexName );
if ( !ServedDesc_t::IsMutable ( pOldIndex ) )
return LoadAndReplaceIndex ( sIndexPath, eIndexType, sIndexName.cstr(), tIndexFiles );
WIdx_T<RtIndex_i*> pIndex { pOldIndex };
pIndex->ProhibitSave();
pIndex->GetIndexFiles ( tIndexFiles.m_dOld, tIndexFiles.m_dOld );
return LoadAndReplaceIndex ( sIndexPath, eIndexType, sIndexName.cstr(), tIndexFiles );
}
// command at remote node for CLUSTER_INDEX_ADD_LOCAL to check sha1 of index file matched and load index into daemon
static bool AddReceivedIndex ( const ClusterIndexAddLocalRequest_t& tAddCmd )
{
uint64_t uKey = DoubleStringKey ( tAddCmd.m_sCluster, tAddCmd.m_sIndex );
if ( !RecvState::HasState ( uKey ) )
return TlsMsg::Err ( "missed writer state at joiner node for cluster '%s' table '%s'", tAddCmd.m_sCluster.cstr(), tAddCmd.m_sIndex.cstr() );
ScopedState_t tStateGuard ( uKey );
std::unique_ptr<MergeState_t> pMerge { RecvState::GetState ( uKey ).Flush () };
if ( tAddCmd.m_eIndex!=IndexType_e::PERCOLATE && tAddCmd.m_eIndex!=IndexType_e::RT )
return TlsMsg::Err ( "unsupported type '%s' in index '%s'", GetIndexTypeName ( tAddCmd.m_eIndex ), tAddCmd.m_sIndex.cstr() );
sphLogDebugRpl ( "rotating table '%s' content from %s", tAddCmd.m_sIndex.cstr(), pMerge->m_sIndexPath.cstr() );
RollbackFilesGuard_t tFilesGuard;
if ( !RotateFiles ( pMerge, tFilesGuard.m_tFiles ) )
return false;
// verify whole index only in case debug replication
#ifndef NDEBUG
// if ( g_eLogLevel>=SPH_LOG_RPL_DEBUG )
{
sphLogDebugRpl ( "verify table '%s' from %s", tAddCmd.m_sIndex.cstr(), pMerge->m_sIndexPath.cstr() );
// check that size matched and sha1 matched prior to loading index
if ( !SyncSigVerify ( pMerge->m_dFilesRef, pMerge->m_dHashes ) )
return false;
}
#endif
sphLogDebugRpl ( "%s table '%s' into cluster '%s' from %s", ( tAddCmd.m_bSendFilesSuccess ? "loading" : "rolling-back" ), tAddCmd.m_sIndex.cstr(), tAddCmd.m_sCluster.cstr(), pMerge->m_sIndexPath.cstr() );
// rollback to old index files via RollbackFilesGuard_t
if ( !tAddCmd.m_bSendFilesSuccess )
return true;
FilesTrait_t tIndexFiles;
if ( !LoadIndex ( pMerge->m_sIndexPath, tAddCmd.m_eIndex, tAddCmd.m_sIndex, tIndexFiles ) )
return TlsMsg::Err ( "failed to load index '%s'", tAddCmd.m_sIndex.cstr() );
if ( !AddLoadedIndexIntoCluster ( tAddCmd.m_sCluster, tAddCmd.m_sIndex ) )
return false;
// keep: only files that donor has
// remove:
// - index files renamed to .old
// - the old index files as the old index could have more files when the new index
RemoveFiles ( tIndexFiles, tFilesGuard.m_tFiles.m_dOld );
// clean rollback files lists
tFilesGuard.m_tFiles.Reset();
return true;
}
// command at remote node for CLUSTER_INDEX_ADD_LOCAL to check sha1 of index file matched and load index into daemon
void ReceiveClusterIndexAddLocal ( ISphOutputBuffer& tOut, InputBuffer_c& tBuf, CSphString& sCluster )
{
ClusterIndexAddLocalRequest_t tAddCmd;
ClusterIndexAddLocal_c::ParseRequest ( tBuf, tAddCmd );
sCluster = tAddCmd.m_sCluster;
if ( AddReceivedIndex ( tAddCmd ) )
ClusterIndexAddLocal_c::BuildReply ( tOut );
}
| 9,304
|
C++
|
.cpp
| 231
| 38.060606
| 205
| 0.725438
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,015
|
serialize.cpp
|
manticoresoftware_manticoresearch/src/replication/serialize.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "sphinx.h"
#include "sphinxstd.h"
#include "searchdaemon.h"
#include "accumulator.h"
#include "serialize.h"
void SaveAttrUpdate ( const CSphAttrUpdate & tUpd, MemoryWriter_c & tWriter )
{
tWriter.PutByte ( tUpd.m_bIgnoreNonexistent );
tWriter.PutByte ( tUpd.m_bStrict );
tWriter.PutDword ( tUpd.m_dAttributes.GetLength() );
for ( const TypedAttribute_t& tElem : tUpd.m_dAttributes )
{
tWriter.PutString ( tElem.m_sName );
tWriter.PutDword ( tElem.m_eType );
}
SaveArray ( tUpd.m_dDocids, tWriter );
SaveArray ( tUpd.m_dRowOffset, tWriter );
SaveArray ( tUpd.m_dPool, tWriter );
SaveArray ( tUpd.m_dBlobs, tWriter );
}
int LoadAttrUpdate ( const BYTE * pBuf, int iLen, CSphAttrUpdate & tUpd, bool & bBlob )
{
MemoryReader_c tIn ( pBuf, iLen );
tUpd.m_bIgnoreNonexistent = !!tIn.GetVal<BYTE>();
tUpd.m_bStrict = !!tIn.GetVal<BYTE>();
bBlob = false;
tUpd.m_dAttributes.Resize ( tIn.GetDword() );
for ( TypedAttribute_t& tElem : tUpd.m_dAttributes )
{
tElem.m_sName = tIn.GetString();
tElem.m_eType = (ESphAttr)tIn.GetDword();
bBlob |= ( tElem.m_eType == SPH_ATTR_UINT32SET || tElem.m_eType == SPH_ATTR_INT64SET || tElem.m_eType == SPH_ATTR_JSON || tElem.m_eType == SPH_ATTR_STRING );
}
GetArray ( tUpd.m_dDocids, tIn );
GetArray ( tUpd.m_dRowOffset, tIn );
GetArray ( tUpd.m_dPool, tIn );
GetArray ( tUpd.m_dBlobs, tIn );
return tIn.GetPos();
}
enum
{
FILTER_FLAG_EXCLUDE = 1UL << 0,
FILTER_FLAG_HAS_EQUAL_MIN = 1UL << 1,
FILTER_FLAG_HAS_EQUAL_MAX = 1UL << 2,
FILTER_FLAG_OPEN_LEFT = 1UL << 3,
FILTER_FLAG_OPEN_RIGHT = 1UL << 4,
FILTER_FLAG_IS_NULL = 1UL << 5
};
namespace {
void SaveFilter ( const CSphFilterSettings & tItem, MemoryWriter_c & tWriter )
{
tWriter.PutString ( tItem.m_sAttrName );
DWORD uFlags = 0;
uFlags |= FILTER_FLAG_EXCLUDE * tItem.m_bExclude;
uFlags |= FILTER_FLAG_HAS_EQUAL_MIN * tItem.m_bHasEqualMin;
uFlags |= FILTER_FLAG_HAS_EQUAL_MAX * tItem.m_bHasEqualMax;
uFlags |= FILTER_FLAG_OPEN_LEFT * tItem.m_bOpenLeft;
uFlags |= FILTER_FLAG_OPEN_RIGHT * tItem.m_bOpenRight;
uFlags |= FILTER_FLAG_IS_NULL * tItem.m_bIsNull;
tWriter.PutDword ( uFlags );
tWriter.PutByte ( tItem.m_eType );
tWriter.PutByte ( tItem.m_eMvaFunc );
tWriter.PutUint64 ( tItem.m_iMinValue );
tWriter.PutUint64 ( tItem.m_iMaxValue );
SaveArray ( tItem.GetValues(), tWriter );
SaveArray ( tItem.m_dStrings, tWriter );
tWriter.PutDword ( 0 ); // legacy N of external values, now always 0
}
void LoadFilter ( CSphFilterSettings & tItem, MemoryReader_c & tReader )
{
tItem.m_sAttrName = tReader.GetString();
DWORD uFlags = tReader.GetDword();
tItem.m_bExclude = !!( uFlags & FILTER_FLAG_EXCLUDE );
tItem.m_bHasEqualMin = !!( uFlags & FILTER_FLAG_HAS_EQUAL_MIN );
tItem.m_bHasEqualMax = !!( uFlags & FILTER_FLAG_HAS_EQUAL_MAX );
tItem.m_bOpenLeft = !!( uFlags & FILTER_FLAG_OPEN_LEFT );
tItem.m_bOpenRight = !!( uFlags & FILTER_FLAG_OPEN_RIGHT );
tItem.m_bIsNull = !!( uFlags & FILTER_FLAG_IS_NULL );
tItem.m_eType = (ESphFilter)tReader.GetVal<BYTE>();
tItem.m_eMvaFunc = (ESphMvaFunc)tReader.GetVal<BYTE>();
tItem.m_iMinValue = tReader.GetVal<uint64_t>();
tItem.m_iMaxValue = tReader.GetVal<uint64_t>();
GetArray ( tItem.m_dValues, tReader );
GetArray ( tItem.m_dStrings, tReader );
CSphVector<SphAttr_t> dOtherValues;
GetArray ( dOtherValues, tReader ); // expected to be empty
// legacy pass - extra values, just push them as plain values here.
if ( !dOtherValues.IsEmpty() )
std::swap ( tItem.m_dValues, dOtherValues );
}
}
void SaveUpdate ( const CSphQuery & tQuery, MemoryWriter_c & tWriter )
{
tWriter.PutString ( tQuery.m_sQuery );
tWriter.PutDword ( tQuery.m_dFilters.GetLength ());
for ( const CSphFilterSettings & tItem : tQuery.m_dFilters )
::SaveFilter ( tItem, tWriter );
SaveArray ( tQuery.m_dFilterTree, tWriter );
}
int LoadUpdate ( const BYTE * pBuf, int iLen, CSphQuery & tQuery )
{
MemoryReader_c tReader ( pBuf, iLen );
tQuery.m_sQuery = tReader.GetString ();
tQuery.m_dFilters.Resize ( tReader.GetDword ());
for ( CSphFilterSettings & tItem : tQuery.m_dFilters )
::LoadFilter ( tItem, tReader );
GetArray ( tQuery.m_dFilterTree, tReader );
return tReader.GetPos ();
}
// commands version (commands these got replicated via Galera)
// ver 0x104 added docstore from RT index
// ver 0x105 fixed CSphWordHit serialization - instead of direct raw blob copy only fields sent (16 bytes vs 24)
// ver 0x106 add total indexed bytes to accum
// ver 0x107 add blobs vector to replicate update statement
// ver 0x108 gtid is sent and parsed as blob (was string)
// ver 0x109 indexes support for ALTER ADD \ DROP table
static constexpr WORD VER_COMMAND_REPLICATE = 0x109;
bool LoadCmdHeader( MemoryReader_c& tReader, ReplicationCommand_t* pCmd )
{
TlsMsg::ResetErr();
auto eCommand = (ReplCmd_e) tReader.GetVal<WORD> ();
if ( eCommand<ReplCmd_e::PQUERY_ADD || eCommand>ReplCmd_e::TOTAL )
return TlsMsg::Err ( "bad replication command %d", (int) eCommand );
pCmd->m_uVersion = tReader.GetVal<WORD> ();
if ( pCmd->m_uVersion>VER_COMMAND_REPLICATE )
return TlsMsg::Err ( "replication command %d, version mismatch %d, got %d", (int) eCommand, VER_COMMAND_REPLICATE, (int)pCmd->m_uVersion );
pCmd->m_eCommand = eCommand;
pCmd->m_sIndex = tReader.GetString ();
return true;
}
void SaveCmdHeader ( const ReplicationCommand_t & tCmd, MemoryWriter_c & tWriter )
{
tWriter.PutWord ((WORD) tCmd.m_eCommand );
tWriter.PutWord ( VER_COMMAND_REPLICATE );
tWriter.PutString ( tCmd.m_sIndex );
}
WORD GetVerCommandReplicate()
{
return VER_COMMAND_REPLICATE;
}
| 6,056
|
C++
|
.cpp
| 151
| 38.211921
| 159
| 0.728511
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,016
|
configuration.cpp
|
manticoresoftware_manticoresearch/src/replication/configuration.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "portrange.h"
#include "searchdaemon.h"
#include "searchdha.h"
static constexpr int g_iDefaultPortBias = 10;
static constexpr int g_iDefaultPortRange = 200;
// incoming address guessed (false) or set via searchd.node_address
bool g_bHasIncoming = false;
// incoming IP part of address set by searchd.node_address or took from listener
static CSphString g_sIncomingIP;
// incoming address (IP:port from API listener) used for request to this node from other daemons
static CSphString g_sIncomingApiPoint;
// listen IP part of address for Galera
static CSphString g_sListenReplicationIP;
// setup IP, ports and node incoming address
bool SetReplicationListener ( const VecTraits_T<ListenerDesc_t> & dListeners, CSphString & sError )
{
bool bGotReplicationPorts = false;
for ( const ListenerDesc_t& tListen : dListeners )
{
if ( tListen.m_eProto!=Proto_e::REPLICATION )
continue;
const bool bBadCount = ( tListen.m_iPortsCount<2 );
const bool bBadRange = ( ( tListen.m_iPortsCount%2 )!=0 && ( tListen.m_iPortsCount-1 )<2 );
if ( bBadCount || bBadRange )
{
sphWarning ( "invalid replication ports count %d, should be at least 2", tListen.m_iPortsCount );
continue;
}
// can not use 0.0.0.0 due to Galera error at ReplicatorSMM::InitConfig::InitConfig
if ( tListen.m_uIP != 0 )
{
std::array<char, SPH_ADDRESS_SIZE> sListenerIP {};
sphFormatIP ( sListenerIP.data(), SPH_ADDRESS_SIZE, tListen.m_uIP );
if ( g_sListenReplicationIP.IsEmpty() )
{
g_sListenReplicationIP = sListenerIP.data();
PortRange::AddAddr ( g_sListenReplicationIP );
} else if ( g_sListenReplicationIP != (const char*)sListenerIP.data() )
{
sphWarning ( "multiple replication IP ('%s') found but only 1st IP '%s' used", sListenerIP.data(), g_sListenReplicationIP.cstr() );
}
} else
{
g_sListenReplicationIP = g_bHasIncoming ? g_sIncomingIP : "127.0.0.1";
if ( g_sIncomingIP.IsEmpty() )
sphWarning ( "can not set '0.0.0.0' as Galera IP, '%s' used", g_sListenReplicationIP.cstr() );
else
sphLogDebugRpl ( "set '%s' as Galera IP", g_sListenReplicationIP.cstr() );
}
PortRange::AddPortsRange (tListen.m_iPort, tListen.m_iPortsCount);
bGotReplicationPorts = true;
}
int iAPIPort = dListeners.GetFirst ( [&] ( const ListenerDesc_t & tListen ) { return tListen.m_eProto==Proto_e::SPHINX; } );
if ( iAPIPort==-1 )
{
sError = "no 'listen' is found, cannot set incoming addresses, replication is disabled";
return false;
}
if ( !bGotReplicationPorts )
{
const ListenerDesc_t& tListen = dListeners[iAPIPort];
if ( tListen.m_uIP != 0 )
{
std::array<char, SPH_ADDRESS_SIZE> sListenerIP {};
sphFormatIP ( sListenerIP.data(), SPH_ADDRESS_SIZE, tListen.m_uIP );
g_sListenReplicationIP = sListenerIP.data();
} else
{
g_sListenReplicationIP = "127.0.0.1";
}
PortRange::AddPortsRange ( tListen.m_iPort + g_iDefaultPortBias, g_iDefaultPortRange );
PortRange::AddAddr ( g_sListenReplicationIP );
}
if ( !g_bHasIncoming )
g_sIncomingIP = g_sListenReplicationIP;
sphLogDebugRpl ( "listens: Galera '%s', own '%s:%d'", g_sListenReplicationIP.cstr(), g_sIncomingIP.cstr(), dListeners[iAPIPort].m_iPort );
g_sIncomingApiPoint.SetSprintf ( "%s:%d", g_sIncomingIP.cstr(), dListeners[iAPIPort].m_iPort );
if ( !IsConfigless() )
sError = "data_dir option is missing in config, replication is disabled";
return IsConfigless();
}
void ReplicationSetIncoming ( CSphString sIncoming )
{
g_sIncomingIP = sIncoming;
g_bHasIncoming = !g_sIncomingIP.IsEmpty();
}
const char* szIncomingIP()
{
return g_sIncomingIP.cstr();
}
bool HasIncoming() noexcept
{
return g_bHasIncoming;
}
const char* szListenReplicationIP()
{
return g_sListenReplicationIP.cstr();
}
const char* szIncomingProto()
{
return g_sIncomingApiPoint.cstr();
}
bool MyIncomingApiAddrBeginsWith ( const char* szHost )
{
return g_sIncomingApiPoint.Begins ( szHost );
}
| 4,404
|
C++
|
.cpp
| 117
| 35.222222
| 139
| 0.73424
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,017
|
cluster_delete.cpp
|
manticoresoftware_manticoresearch/src/replication/cluster_delete.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "cluster_delete.h"
#include "cluster_commands.h"
#include "api_command_cluster.h"
#include "common.h"
#include "nodes.h"
// API command to remote node to delete cluster
using ClusterDelete_c = ClusterCommand_T<E_CLUSTER::DELETE_>;
void ReceiveClusterDelete ( ISphOutputBuffer & tOut, InputBuffer_c & tBuf, CSphString& sCluster )
{
ClusterRequest_t tCmd;
ClusterDelete_c::ParseRequest ( tBuf, tCmd );
sCluster = tCmd.m_sCluster;
TLS_MSG_STRING ( sError );
if ( ClusterDelete ( sCluster ) && SaveConfigInt ( sError ) )
ClusterDelete_c::BuildReply ( tOut );
}
void SendClusterDeleteToNodes ( const VecTraits_T<CSphString>& dNodes, const CSphString& sCluster )
{
if ( dNodes.IsEmpty() )
return;
ClusterRequest_t tData { sCluster };
ClusterDelete_c tReq;
auto dAgents = tReq.MakeAgents ( GetDescAPINodes ( dNodes, Resolve_e::SLOW ), ReplicationTimeoutQuery(), tData );
PerformRemoteTasksWrap ( dAgents, tReq, tReq, true );
}
| 1,425
|
C++
|
.cpp
| 36
| 38
| 114
| 0.756151
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,018
|
cluster_file_reserve.cpp
|
manticoresoftware_manticoresearch/src/replication/cluster_file_reserve.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "cluster_file_reserve.h"
#include "cluster_commands.h"
#include "api_command_cluster.h"
#include "searchdreplication.h"
#include "digest_sha1.h"
#include "send_files.h"
#include "recv_state.h"
#include <cmath>
void GetArray ( StrVec_t& dBuf, InputBuffer_c& tIn )
{
int iCount = tIn.GetInt();
if ( !iCount )
return;
dBuf.Resize ( iCount );
for ( CSphString& sVal : dBuf )
sVal = tIn.GetString();
}
void GetArray ( FixedStrVec_t& dBuf, InputBuffer_c& tIn )
{
int iCount = tIn.GetInt();
if ( !iCount )
return;
dBuf.Reset ( iCount );
for ( CSphString& sVal : dBuf )
sVal = tIn.GetString();
}
template<typename T>
void GetArray ( CSphFixedVector<T>& dBuf, InputBuffer_c& tIn )
{
int iCount = tIn.GetInt();
if ( !iCount )
return;
dBuf.Reset ( iCount );
tIn.GetBytes ( dBuf.Begin(), (int)dBuf.GetLengthBytes() );
}
template<typename T>
void GetArray ( CSphVector<T>& dBuf, InputBuffer_c& tIn )
{
int iCount = tIn.GetInt();
if ( !iCount )
return;
dBuf.Resize ( iCount );
tIn.GetBytes ( dBuf.Begin(), (int)dBuf.GetLengthBytes() );
}
template<typename T>
void SendArray ( const VecTraits_T<T>& dBuf, ISphOutputBuffer& tOut )
{
tOut.SendInt ( dBuf.GetLength() );
if ( dBuf.GetLength() )
tOut.SendBytes ( dBuf.Begin(), dBuf.GetLengthBytes() );
}
void SendArray ( const VecTraits_T<CSphString>& dBuf, ISphOutputBuffer& tOut )
{
tOut.SendInt ( dBuf.GetLength() );
for ( const CSphString& sVal : dBuf )
tOut.SendString ( sVal.cstr() );
}
void operator<< ( ISphOutputBuffer& tOut, const FileReserveRequest_t& tReq )
{
tOut << (const ClusterRequest_t&) tReq;
tOut.SendString ( tReq.m_sIndex.cstr() );
tOut.SendString ( tReq.m_sIndexFileName.cstr() );
assert ( tReq.m_pChunks );
const SyncSrc_t* pSrc = tReq.m_pChunks;
SendArray ( pSrc->m_dBaseNames, tOut );
SendArray ( pSrc->m_dChunks, tOut );
SendArray ( pSrc->m_dHashes, tOut );
}
StringBuilder_c& operator<< ( StringBuilder_c& tOut, const FileReserveRequest_t& tReq )
{
const SyncSrc_t* pSrc = tReq.m_pChunks;
tOut << (const ClusterRequest_t&)tReq
<< "index" << tReq.m_sIndex << "indexfilename" << tReq.m_sIndexFileName
<< "basenames:" << pSrc->m_dBaseNames.GetLength()
<< "chunks:" << pSrc->m_dChunks.GetLength()
<< "hashes:" << pSrc->m_dHashes.GetLength();
return tOut;
}
void operator>> ( InputBuffer_c& tIn, FileReserveRequest_t& tReq )
{
tIn >> (ClusterRequest_t&)tReq;
tReq.m_sIndex = tIn.GetString();
tReq.m_sIndexFileName = tIn.GetString();
assert ( tReq.m_pChunks );
SyncSrc_t* pSrc = tReq.m_pChunks;
GetArray ( pSrc->m_dBaseNames, tIn );
GetArray ( pSrc->m_dChunks, tIn );
GetArray ( pSrc->m_dHashes, tIn );
}
void operator<< ( ISphOutputBuffer& tOut, const FileReserveReply_t& tReq )
{
tOut.SendByte ( tReq.m_bIndexActive );
SendArray ( tReq.m_dRemotePaths, tOut );
tOut.SendInt ( tReq.m_dNodeChunksMask.GetSize() );
tOut.SendBytes ( tReq.m_dNodeChunksMask.Begin(), tReq.m_dNodeChunksMask.GetSizeBytes() );
tOut.SendUint64 ( tReq.m_tmTimeout );
tOut.SendUint64 ( tReq.m_tmTimeoutFile );
}
StringBuilder_c& operator<< ( StringBuilder_c& tOut, const FileReserveReply_t& tReq )
{
tOut << "index active:" << (tReq.m_bIndexActive?"yes":"no")
<< "remote paths:" << tReq.m_dRemotePaths
<< "mask size:" << tReq.m_dNodeChunksMask.GetSize()
<< "timeout:"<< tReq.m_tmTimeout
<< "timeout file:" << tReq.m_tmTimeoutFile;
return tOut;
}
void operator>> ( InputBuffer_c& tIn, FileReserveReply_t& tReq )
{
tReq.m_bIndexActive = !!tIn.GetByte();
GetArray ( tReq.m_dRemotePaths, tIn );
int iBits = tIn.GetInt();
tReq.m_dNodeChunksMask.Init ( iBits );
tIn.GetBytes ( tReq.m_dNodeChunksMask.Begin(), tReq.m_dNodeChunksMask.GetSizeBytes() );
tReq.m_tmTimeout = (int64_t)tIn.GetUint64();
tReq.m_tmTimeoutFile = (int64_t)tIn.GetUint64();
}
struct ScopedFilesRemoval_t: public ISphNoncopyable
{
explicit ScopedFilesRemoval_t ( VecTraits_T<CSphString>* pFiles )
: m_pFiles ( pFiles )
{}
~ScopedFilesRemoval_t()
{
if ( m_pFiles )
{
for ( const CSphString& sFile : *m_pFiles )
{
if ( !sFile.IsEmpty() && sphFileExists ( sFile.cstr() ) )
::unlink ( sFile.cstr() );
}
}
}
VecTraits_T<CSphString>* m_pFiles { nullptr };
};
// command at remote node for CLUSTER_FILE_RESERVE to check
// - file could be allocated on disk at cluster path and reserve disk space for a file
// - or make sure that index has exact same index file, ie sha1 matched
bool ClusterFileReserve ( const FileReserveRequest_t & tCmd, FileReserveReply_t & tRes )
{
sphLogDebugRpl ( "reserve table '%s'", tCmd.m_sIndex.cstr() );
int64_t tmStartReserve = sphMicroTimer();
CSphString sLocalIndexPath;
assert ( tCmd.m_pChunks );
// use index path first
{
cServedIndexRefPtr_c pServed = GetServed ( tCmd.m_sIndex );
if ( ServedDesc_t::IsMutable ( pServed ) )
{
tRes.m_bIndexActive = true;
sLocalIndexPath = pServed->m_sIndexPath;
RIdx_T<RtIndex_i*> ( pServed )->ProhibitSave();
}
}
tRes.m_dRemotePaths.Resize ( tCmd.m_pChunks->m_dBaseNames.GetLength ());
// use cluster path as head of index path or existed index path
if ( tRes.m_bIndexActive )
{
CSphString sPathOnly = GetPathOnly ( sLocalIndexPath );
// set index files names into existing index files
ARRAY_FOREACH ( iFile, tCmd.m_pChunks->m_dBaseNames )
{
const CSphString & sFile = tCmd.m_pChunks->m_dBaseNames[iFile];
tRes.m_dRemotePaths[iFile].SetSprintf ( "%s%s", sPathOnly.cstr(), sFile.cstr() );
}
} else
{
auto tIndexPath = GetClusterPath ( tCmd.m_sCluster );
if ( !tIndexPath )
return false;
// index in its own directory
sLocalIndexPath.SetSprintf ( "%s/%s", tIndexPath->cstr(), tCmd.m_sIndexFileName.cstr() );
MkDir ( sLocalIndexPath.cstr() );
// set index files names into cluster folder
ARRAY_FOREACH ( iFile, tCmd.m_pChunks->m_dBaseNames )
{
const CSphString & sFile = tCmd.m_pChunks->m_dBaseNames[iFile];
tRes.m_dRemotePaths[iFile].SetSprintf ( "%s/%s", sLocalIndexPath.cstr(), sFile.cstr() );
}
sLocalIndexPath.SetSprintf ( "%s/%s/%s", tIndexPath->cstr(), tCmd.m_sIndexFileName.cstr(), tCmd.m_sIndexFileName.cstr() );
}
int iBits = tCmd.m_pChunks->m_dChunks.Last().m_iHashStartItem + tCmd.m_pChunks->m_dChunks.Last().GetChunksCount();
tRes.m_dNodeChunksMask.Init ( iBits );
CSphVector<BYTE> dReadBuf;
int64_t tmTimeoutFile = 0;
// check file exists, same size and same hash
ARRAY_FOREACH ( iFile, tRes.m_dRemotePaths )
{
TlsMsg::KeepError_c sError;
const CSphString & sFile = tRes.m_dRemotePaths[iFile];
const FileChunks_t & tFile = tCmd.m_pChunks->m_dChunks[iFile];
if ( sphIsReadable ( sFile ) )
{
int64_t iLen = 0;
{
CSphAutofile tOut ( sFile, SPH_O_READ, sError, false );
if ( tOut.GetFD()<0 )
return false;
iLen = tOut.GetSize();
}
// check only in case size matched
if ( iLen==tFile.m_iFileSize )
{
int64_t tmReadStart = sphMicroTimer();
if ( !VerifyFileHash ( iFile, sFile, *tCmd.m_pChunks, tRes.m_dNodeChunksMask, dReadBuf, sError ) )
return false;
int64_t tmReadDelta = sphMicroTimer() - tmReadStart;
tmTimeoutFile = Max ( tmReadDelta, tmTimeoutFile );
}
}
}
StrVec_t dLocalPaths ( tRes.m_dRemotePaths.GetLength() );
ScopedFilesRemoval_t tFilesCleanup ( &dLocalPaths );
// create files these will be transferred from donor
ARRAY_FOREACH ( iFile, tRes.m_dRemotePaths )
{
// no need to create file for fully matched file
if ( tRes.m_dNodeChunksMask.BitGet ( iFile ) )
continue;
// file content from donor will be saved into a file with a '.new' extension
dLocalPaths[iFile].SetSprintf ( "%s.new", tRes.m_dRemotePaths[iFile].cstr() );
const CSphString& sFile = dLocalPaths[iFile];
const FileChunks_t& tFile = tCmd.m_pChunks->m_dChunks[iFile];
// need to create file with specific size
CSphString sError;
CSphAutofile tOut ( sFile, SPH_O_NEW, sError, false );
if ( tOut.GetFD()<0 )
return TlsMsg::Err ( sError );
int64_t iLen = sphSeek ( tOut.GetFD(), tFile.m_iFileSize, SEEK_SET );
if ( iLen!=tFile.m_iFileSize )
{
if ( iLen<0 )
return TlsMsg::Err ( "error: %d '%s'", errno, strerrorm ( errno ) );
return TlsMsg::Err ( "error, expected: " INT64_FMT ", got " INT64_FMT, tFile.m_iFileSize, iLen );
}
}
tFilesCleanup.m_pFiles = nullptr;
tRes.m_dRemotePaths.SwapData ( dLocalPaths );
tRes.m_tmTimeoutFile = tmTimeoutFile / 1000;
tRes.m_tmTimeout = ( sphMicroTimer() - tmStartReserve ) / 1000;
assert ( !RecvState::HasState ( DoubleStringKey ( tCmd.m_sCluster, tCmd.m_sIndex ) ) );
RecvState::GetState ( DoubleStringKey ( tCmd.m_sCluster, tCmd.m_sIndex ) ).SetMerge ( *tCmd.m_pChunks, tRes, sLocalIndexPath, dLocalPaths );
return true;
}
void ReceiveClusterFileReserve ( ISphOutputBuffer & tOut, InputBuffer_c & tBuf, CSphString& sCluster )
{
SyncSrc_t tSrc;
FileReserveRequest_t tCmd;
tCmd.m_pChunks = &tSrc;
ClusterFileReserve_c::ParseRequest ( tBuf, tCmd );
sCluster = tCmd.m_sCluster;
FileReserveReply_t tRes;
if ( ClusterFileReserve ( tCmd, tRes ) )
ClusterFileReserve_c::BuildReply ( tOut, tRes );
}
bool SendClusterFileReserve ( VecRefPtrs_t<AgentConn_t*>& dAgents )
{
if ( dAgents.IsEmpty() )
return false;
ClusterFileReserve_c tReq;
return PerformRemoteTasksWrap ( dAgents, tReq, tReq, true );
}
| 9,737
|
C++
|
.cpp
| 273
| 33.216117
| 141
| 0.711432
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,019
|
wsrep_v25.cpp
|
manticoresoftware_manticoresearch/src/replication/wsrep_v25.cpp
|
//
// Copyright (c) 2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "wsrep_cxx_int.h"
namespace RAW25
{
static const char* INTERFACE_VERSION = "25";
enum class Status_e : int {
OK = 0,
WARNING, // minor warning
TRX_MISSED,
TRX_FAIL,
BRUTEFORCE_ABORT,
SIZE_EXCEEDED,
CONNECTION_FAIL, // must abort
NODE_FAIL, // must reinit
FATAL, // must abort
NOT_IMPL, // not implemented
};
inline const char* GetStatus ( Status_e eStatus ) noexcept
{
switch ( eStatus )
{
case Status_e::OK: return "success";
case Status_e::WARNING: return "warning";
case Status_e::TRX_MISSED: return "transaction is not known";
case Status_e::TRX_FAIL: return "transaction aborted, server can continue";
case Status_e::BRUTEFORCE_ABORT: return "transaction was victim of brute force abort";
case Status_e::SIZE_EXCEEDED: return "data exceeded maximum supported size";
case Status_e::CONNECTION_FAIL: return "error in client connection, must abort";
case Status_e::NODE_FAIL: return "error in node state, must reinit";
case Status_e::FATAL: return "fatal error, server must abort";
case Status_e::NOT_IMPL: return "feature not implemented";
default: return strerror ( static_cast<int> ( eStatus ) );
}
}
// types from wsrep internals
struct Wsrep_t;
// init/deinit
using FnInit = Status_e ( * ) ( Wsrep_t*, const InitArgs_t* );
using FnFree = void ( * ) ( Wsrep_t* );
using FnCapabilities = uint64_t ( * ) ( Wsrep_t* );
// options
using FnOptionsSet = Status_e ( * ) ( Wsrep_t*, const char* );
using FnOptionsGet = char* (*)( Wsrep_t* );
// connect
using FnConnect = Status_e ( * ) ( Wsrep_t*, const char*, const char*, const char*, bool );
using FnDisconnect = Status_e ( * ) ( Wsrep_t* );
// replication
using FnRecv = Status_e ( * ) ( Wsrep_t*, void* );
using FnPreCommit = Status_e ( * ) ( Wsrep_t*, uint64_t, TxHandle_t*, DWORD, Wsrep::TrxMeta_t* );
using FnPostCommit = Status_e ( * ) ( Wsrep_t*, TxHandle_t* );
using FnPostRollback = Status_e ( * ) ( Wsrep_t*, TxHandle_t* );
using FnReplayTrx = Status_e ( * ) ( Wsrep_t*, TxHandle_t*, void* );
using FnAbortPreCommit = Status_e ( * ) ( Wsrep_t*, int64_t, uint64_t );
using FnAppendKey = Status_e ( * ) ( Wsrep_t*, TxHandle_t*, const Key_t*, uint64_t, KeyType_e, bool );
using FnAppendData = Status_e ( * ) ( Wsrep_t*, TxHandle_t*, const Buf_t*, uint64_t, DataType_e, bool );
using FnCasualRead = Status_e ( * ) ( Wsrep_t*, Wsrep::GlobalTid_t* );
using FnFreeConnection = Status_e ( * ) ( Wsrep_t*, uint64_t );
// total order
using FnToExecuteStart = Status_e ( * ) ( Wsrep_t*, uint64_t, const Key_t*, uint64_t, const Buf_t*, uint64_t, Wsrep::TrxMeta_t* );
using FnToExecuteEnd = Status_e ( * ) ( Wsrep_t*, uint64_t );
// preordered
using FnPreorderedCollect = Status_e ( * ) ( Wsrep_t*, PoHandle_t*, const Buf_t*, uint64_t, bool );
using FnPreorderedCommit = Status_e ( * ) ( Wsrep_t*, PoHandle_t*, const Wsrep::UUID_t*, DWORD, int, bool );
// sst
using FnSstSent = Status_e ( * ) ( Wsrep_t*, const Wsrep::GlobalTid_t*, int );
using FnSstReceived = Status_e ( * ) ( Wsrep_t*, const Wsrep::GlobalTid_t*, const void*, uint64_t, int );
using FnSnapshot = Status_e ( * ) ( Wsrep_t*, const void*, uint64_t, const char* );
// stat var
using FnStatsGet = Wsrep::StatsVars_t* (*)( Wsrep_t* );
using FnStatsFree = void ( * ) ( Wsrep_t*, const Wsrep::StatsVars_t* );
using FnStatsReset = void ( * ) ( Wsrep_t* );
// misc
using FnPause = int64_t ( * ) ( Wsrep_t* ); // returns SeqNo
using FnResume = Status_e ( * ) ( Wsrep_t* );
using FnDesync = Status_e ( * ) ( Wsrep_t* );
using FnResync = Status_e ( * ) ( Wsrep_t* );
// lock/unlock
using FnLock = Status_e ( * ) ( Wsrep_t*, const char*, bool, uint64_t, int64_t );
using FnUnlock = Status_e ( * ) ( Wsrep_t*, const char*, uint64_t );
using FnIsLocked = bool ( * ) ( Wsrep_t*, const char*, uint64_t*, Wsrep::UUID_t* );
struct Wsrep_t
{
const char* m_szInterfaceVersion;
FnInit m_fnInit;
FnCapabilities m_fnCapabilities;
FnOptionsSet m_fnOptionsSet;
FnOptionsGet m_fnOptionsGet;
FnConnect m_fnConnect;
FnDisconnect m_fnDisconnect;
FnRecv m_fnRecv;
FnPreCommit m_fnPreCommit;
FnPostCommit m_fnPostCommit;
FnPostRollback m_fnPostRollback;
FnReplayTrx m_fnReplayTrx;
FnAbortPreCommit m_fnAbortPreCommit;
FnAppendKey m_fnAppendKey;
FnAppendData m_fnAppendData;
FnCasualRead m_fnCasualRead;
FnFreeConnection m_fnFreeConnection;
FnToExecuteStart m_fnToExecuteStart;
FnToExecuteEnd m_fnToExecuteEnd;
FnPreorderedCollect m_fnPreorderedCollect;
FnPreorderedCommit m_fnPreorderedCommit;
FnSstSent m_fnSstSent;
FnSstReceived m_fnSstReceived;
FnSnapshot m_fnSnapshot;
FnStatsGet m_fnStatsGet;
FnStatsFree m_fnStatsFree;
FnStatsReset m_fnStatsReset;
FnPause m_fnPause;
FnResume m_fnResume;
FnDesync m_fnDesync;
FnResync m_fnResync;
FnLock m_fnLock;
FnUnlock m_fnUnlock;
FnIsLocked m_fnIsLocked;
const char* m_szName;
const char* m_szVersion;
const char* m_szVendor;
FnFree m_fnFree;
void *m_pDlh, *m_pCtx;
};
// boring wrapper of everything in Raw::Wsrep_c, refcounted
struct WrappedWsrep_t final : public ISphRefcountedMT
{
Wsrep_t m_tWsrep;
std::unique_ptr<CSphDynamicLibrary> m_pWsrepLib;
Status_e Init ( const InitArgs_t* pArgs )
{
if ( !m_tWsrep.m_fnInit )
return Status_e::NOT_IMPL;
return m_tWsrep.m_fnInit ( &m_tWsrep, pArgs );
}
uint64_t Capabilities()
{
assert ( m_tWsrep.m_fnCapabilities );
return m_tWsrep.m_fnCapabilities ( &m_tWsrep );
}
Status_e OptionsSet ( const char* szConf )
{
assert ( m_tWsrep.m_fnOptionsSet );
return m_tWsrep.m_fnOptionsSet ( &m_tWsrep, szConf );
}
char* OptionsGet()
{
assert ( m_tWsrep.m_fnOptionsGet );
return m_tWsrep.m_fnOptionsGet ( &m_tWsrep );
}
Status_e Connect ( const char* szCluster, const char* szClusterUrl, const char* szStateDonor, bool bBootstrap )
{
assert ( m_tWsrep.m_fnConnect );
return m_tWsrep.m_fnConnect ( &m_tWsrep, szCluster, szClusterUrl, szStateDonor, bBootstrap );
}
Status_e Disconnect()
{
assert ( m_tWsrep.m_fnDisconnect );
return m_tWsrep.m_fnDisconnect ( &m_tWsrep );
}
Status_e Recv ( void* pRecvCtx )
{
assert ( m_tWsrep.m_fnRecv );
return m_tWsrep.m_fnRecv ( &m_tWsrep, pRecvCtx );
}
Status_e PreCommit ( uint64_t uConnID, TxHandle_t* pHandle, DWORD uFlags, Wsrep::TrxMeta_t* pMeta )
{
assert ( m_tWsrep.m_fnPreCommit );
return m_tWsrep.m_fnPreCommit ( &m_tWsrep, uConnID, pHandle, uFlags, pMeta );
}
Status_e PostCommit ( TxHandle_t* pHandle )
{
assert ( m_tWsrep.m_fnPostCommit );
return m_tWsrep.m_fnPostCommit ( &m_tWsrep, pHandle );
}
Status_e PostRollback ( TxHandle_t* pHandle )
{
assert ( m_tWsrep.m_fnPostRollback );
return m_tWsrep.m_fnPostRollback ( &m_tWsrep, pHandle );
}
Status_e ReplayTrx ( TxHandle_t* pHandle, void* pTrxCtx )
{
assert ( m_tWsrep.m_fnReplayTrx );
return m_tWsrep.m_fnReplayTrx ( &m_tWsrep, pHandle, pTrxCtx );
}
Status_e AbortPreCommit ( int64_t iSeqNo, uint64_t tVictimTrx )
{
assert ( m_tWsrep.m_fnAbortPreCommit );
return m_tWsrep.m_fnAbortPreCommit ( &m_tWsrep, iSeqNo, tVictimTrx );
}
Status_e AppendKey ( TxHandle_t* pHandle, const Key_t* pKeys, uint64_t iCount, KeyType_e eKeyType, bool bCopy )
{
assert ( m_tWsrep.m_fnAppendKey );
return m_tWsrep.m_fnAppendKey ( &m_tWsrep, pHandle, pKeys, iCount, eKeyType, bCopy );
}
Status_e AppendData ( TxHandle_t* pHandle, const Buf_t* pData, uint64_t iCount, DataType_e eDatatype, bool bCopy )
{
assert ( m_tWsrep.m_fnAppendData );
return m_tWsrep.m_fnAppendData ( &m_tWsrep, pHandle, pData, iCount, eDatatype, bCopy );
}
Status_e CausalRead ( Wsrep::GlobalTid_t* pGtid )
{
assert ( m_tWsrep.m_fnCasualRead );
return m_tWsrep.m_fnCasualRead ( &m_tWsrep, pGtid );
}
Status_e FreeConnection ( uint64_t uConnID )
{
assert ( m_tWsrep.m_fnFreeConnection );
return m_tWsrep.m_fnFreeConnection ( &m_tWsrep, uConnID );
}
Status_e ToExecuteStart ( uint64_t uConnID, const Key_t* pKeys, uint64_t NKeys, const Buf_t* pAction, uint64_t uCount, Wsrep::TrxMeta_t* pMeta )
{
assert ( m_tWsrep.m_fnToExecuteStart );
return m_tWsrep.m_fnToExecuteStart ( &m_tWsrep, uConnID, pKeys, NKeys, pAction, uCount, pMeta );
}
Status_e ToExecuteEnd ( uint64_t uConnID )
{
assert ( m_tWsrep.m_fnToExecuteEnd );
return m_tWsrep.m_fnToExecuteEnd ( &m_tWsrep, uConnID );
}
Status_e PreorderedCollect ( PoHandle_t* pHandle, const Buf_t* pData, uint64_t uCount, bool bCopy )
{
assert ( m_tWsrep.m_fnPreorderedCollect );
return m_tWsrep.m_fnPreorderedCollect ( &m_tWsrep, pHandle, pData, uCount, bCopy );
}
Status_e PreorderedCommit ( PoHandle_t* pHandle, const Wsrep::UUID_t* pSourceId, DWORD uFlags, int iRange, bool bCommit )
{
assert ( m_tWsrep.m_fnPreorderedCommit );
return m_tWsrep.m_fnPreorderedCommit ( &m_tWsrep, pHandle, pSourceId, uFlags, iRange, bCommit );
}
Status_e SstSent ( const Wsrep::GlobalTid_t* state_id, int iCode )
{
assert ( m_tWsrep.m_fnSstSent );
return m_tWsrep.m_fnSstSent ( &m_tWsrep, state_id, iCode );
}
Status_e SstReceived ( const Wsrep::GlobalTid_t* pStateID, const void* pState, uint64_t uLen, int iCode )
{
assert ( m_tWsrep.m_fnSstReceived );
return m_tWsrep.m_fnSstReceived ( &m_tWsrep, pStateID, pState, uLen, iCode );
}
Status_e Snapshot ( const void* pMsg, uint64_t uLen, const char* szDonorSpec )
{
assert ( m_tWsrep.m_fnSnapshot );
return m_tWsrep.m_fnSnapshot ( &m_tWsrep, pMsg, uLen, szDonorSpec );
}
Wsrep::StatsVars_t* StatsGet()
{
assert ( m_tWsrep.m_fnStatsGet );
return m_tWsrep.m_fnStatsGet ( &m_tWsrep );
}
void StatsFree ( Wsrep::StatsVars_t* pVars )
{
assert ( m_tWsrep.m_fnStatsFree );
m_tWsrep.m_fnStatsFree ( &m_tWsrep, pVars );
}
void StatsReset()
{
assert ( m_tWsrep.m_fnStatsReset );
m_tWsrep.m_fnStatsReset ( &m_tWsrep );
}
int64_t Pause()
{
assert ( m_tWsrep.m_fnPause );
return m_tWsrep.m_fnPause ( &m_tWsrep );
}
Status_e Resume()
{
assert ( m_tWsrep.m_fnResume );
return m_tWsrep.m_fnResume ( &m_tWsrep );
}
Status_e Desync()
{
assert ( m_tWsrep.m_fnDesync );
return m_tWsrep.m_fnDesync ( &m_tWsrep );
}
Status_e Resync()
{
assert ( m_tWsrep.m_fnResync );
return m_tWsrep.m_fnResync ( &m_tWsrep );
}
Status_e Lock ( const char* szName, bool bShared, uint64_t uOwner, int64_t uOut )
{
assert ( m_tWsrep.m_fnLock );
return m_tWsrep.m_fnLock ( &m_tWsrep, szName, bShared, uOwner, uOut );
}
Status_e Unlock ( const char* szName, uint64_t uOwner )
{
assert ( m_tWsrep.m_fnUnlock );
return m_tWsrep.m_fnUnlock ( &m_tWsrep, szName, uOwner );
}
bool IsLocked ( const char* szName, uint64_t* pConn, Wsrep::UUID_t* pNode )
{
assert ( m_tWsrep.m_fnIsLocked );
return m_tWsrep.m_fnIsLocked ( &m_tWsrep, szName, pConn, pNode );
}
public:
using Status_e_ = Status_e;
inline static const char* szGetStatus ( Status_e eStatus ) noexcept
{
return GetStatus ( eStatus );
}
public:
// nothing should be used if LoadWsrep() returned false.
bool CheckLoadWsrep()
{
TlsMsg::ResetErr();
auto eLogLvl = LogLevel_e::ERROR_;
AT_SCOPE_EXIT ( [&eLogLvl] { if ( TlsMsg::HasErr() ) WsrepLog ( eLogLvl, TlsMsg::szError()); TlsMsg::ResetErr(); } );
m_tWsrep.m_pDlh = m_pWsrepLib->GetLib();
if ( !!strcmp ( INTERFACE_VERSION, m_tWsrep.m_szInterfaceVersion ) )
return TlsMsg::Err ( "wrong galera interface version. Need %s, got %s", INTERFACE_VERSION, m_tWsrep.m_szInterfaceVersion );
if ( ( m_tWsrep.m_fnInit || TlsMsg::Err ( "wrong Init" ) )
&& ( m_tWsrep.m_fnCapabilities || TlsMsg::Err ( "wrong Capabilities" ) )
&& ( m_tWsrep.m_fnOptionsSet || TlsMsg::Err ( "wrong OptionsSet" ) )
&& ( m_tWsrep.m_fnOptionsGet || TlsMsg::Err ( "wrong OptionsGet" ) )
&& ( m_tWsrep.m_fnConnect || TlsMsg::Err ( "wrong Connect" ) )
&& ( m_tWsrep.m_fnDisconnect || TlsMsg::Err ( "wrong Disconnect" ) )
&& ( m_tWsrep.m_fnRecv || TlsMsg::Err ( "wrong Recv" ) )
&& ( m_tWsrep.m_fnPreCommit || TlsMsg::Err ( "wrong PreCommit" ) )
&& ( m_tWsrep.m_fnPostCommit || TlsMsg::Err ( "wrong PostCommit" ) )
&& ( m_tWsrep.m_fnPostRollback || TlsMsg::Err ( "wrong PostRollback" ) )
&& ( m_tWsrep.m_fnReplayTrx || TlsMsg::Err ( "wrong ReplayTrx" ) )
&& ( m_tWsrep.m_fnAbortPreCommit || TlsMsg::Err ( "wrong AbortPreCommit" ) )
&& ( m_tWsrep.m_fnAppendKey || TlsMsg::Err ( "wrong AppendKey" ) )
&& ( m_tWsrep.m_fnAppendData || TlsMsg::Err ( "wrong AppendData" ) )
&& ( m_tWsrep.m_fnCasualRead || TlsMsg::Err ( "wrong CasualRead" ) )
&& ( m_tWsrep.m_fnFreeConnection || TlsMsg::Err ( "wrong FreeConnection" ) )
&& ( m_tWsrep.m_fnToExecuteStart || TlsMsg::Err ( "wrong ToExecuteStart" ) )
&& ( m_tWsrep.m_fnToExecuteEnd || TlsMsg::Err ( "wrong ToExecuteEnd" ) )
&& ( m_tWsrep.m_fnPreorderedCollect || TlsMsg::Err ( "wrong PreorderedCollect" ) )
&& ( m_tWsrep.m_fnPreorderedCommit || TlsMsg::Err ( "wrong PreorderedCommit" ) )
&& ( m_tWsrep.m_fnSstSent || TlsMsg::Err ( "wrong SstSent" ) )
&& ( m_tWsrep.m_fnSstReceived || TlsMsg::Err ( "wrong SstReceived" ) )
&& ( m_tWsrep.m_fnSnapshot || TlsMsg::Err ( "wrong Snapshot" ) )
&& ( m_tWsrep.m_fnStatsGet || TlsMsg::Err ( "wrong StatsGet" ) )
&& ( m_tWsrep.m_fnStatsFree || TlsMsg::Err ( "wrong StatsFree" ) )
&& ( m_tWsrep.m_fnStatsReset || TlsMsg::Err ( "wrong StatsReset" ) )
&& ( m_tWsrep.m_fnPause || TlsMsg::Err ( "wrong Pause" ) )
&& ( m_tWsrep.m_fnResume || TlsMsg::Err ( "wrong Resume" ) )
&& ( m_tWsrep.m_fnDesync || TlsMsg::Err ( "wrong Desync" ) )
&& ( m_tWsrep.m_fnResync || TlsMsg::Err ( "wrong Resync" ) )
&& ( m_tWsrep.m_fnLock || TlsMsg::Err ( "wrong Lock" ) )
&& ( m_tWsrep.m_fnUnlock || TlsMsg::Err ( "wrong Unlock" ) )
&& ( m_tWsrep.m_fnIsLocked || TlsMsg::Err ( "wrong IsLocked" ) ) )
{
eLogLvl = LogLevel_e::INFO;
TlsMsg::Err() << m_tWsrep.m_szName << " " << m_tWsrep.m_szVersion << " by " << m_tWsrep.m_szVendor << " loaded ok.";
return true;
}
return false;
}
protected:
~WrappedWsrep_t() final
{
if ( !m_tWsrep.m_fnFree )
return;
m_tWsrep.m_fnFree ( &m_tWsrep );
m_tWsrep.m_fnFree = nullptr;
}
};
class Provider_c final : public Provider_T<WrappedWsrep_t>
{
using BASE = Provider_T<WrappedWsrep_t>;
// callback for Galera commit that transaction received and parsed before should be committed or rolled back
static CbStatus_e Commit_fn ( void* pRecvCtx, DWORD uFlags, const Wsrep::TrxMeta_t* pMeta, bool* pExit, bool bCommit )
{
return BASE::Commit_fn ( pRecvCtx, nullptr, uFlags, pMeta, pExit, bCommit );
}
public:
using WSREPWRAP = WrappedWsrep_t;
Provider_c ( Wsrep::Cluster_i* pCluster, WSREPWRAP* pWsrep )
: BASE { pCluster, pWsrep }
{ }
bool Init ( CSphString sName, const char* szListenAddr, const char* szIncoming, const char* szPath, const char* szOptions )
{
if ( !m_pWsrep )
return false;
m_sName = std::move ( sName );
struct
{
void* m_pCtx;
const char* m_szName;
const char* m_szAddress;
const char* m_szIncoming;
const char* m_szPath;
const char* m_szOptions;
int m_iProtoVer;
const Wsrep::GlobalTid_t* m_pStateID;
const char* m_sState;
uint64_t m_uStateLen;
// callbacks
void ( *m_fnLoger ) ( LogLevel_e, const char* );
CbStatus_e ( *m_fnViewChanged ) ( void*, void*, const Wsrep::ViewInfo_t*, const char*, uint64_t, void**, uint64_t* );
CbStatus_e ( *m_fnApply ) ( void*, const void*, uint64_t, DWORD, const Wsrep::TrxMeta_t* );
CbStatus_e ( *m_fnCommit ) ( void*, /*const void*,*/ DWORD, const Wsrep::TrxMeta_t*, bool*, bool );
CbStatus_e ( *Unordered_fn ) ( void*, const void*, uint64_t );
CbStatus_e ( *SstDonate_fn ) ( void*, void*, const void*, uint64_t, const Wsrep::GlobalTid_t*, const char*, uint64_t, bool );
void ( *m_fnSynced ) ( void* );
} tArgs = {
m_pCluster, m_sName.cstr(), szListenAddr, szIncoming, szPath, szOptions, 127, &m_tStateID, "", 0, WsrepLog, ViewChanged_fn // app + recv
, Apply_fn // recv
, Commit_fn // recv
, Unordered_fn // recv
, SstDonate_fn // app + recv
, Synced_fn // app
};
auto eRes = m_pWsrep->Init ( &tArgs );
return eRes == Status_e::OK || TlsMsg::Err ( "replication init failed: %d '%s'", (int)eRes, GetStatus ( eRes ) );
}
Wsrep::Applier_i* GetApplier() final
{
return nullptr;
}
};
} // namespace RAW25
template<>
void Writeset_T<RAW25::WrappedWsrep_t>::InterimCommit()
{
m_eLastRes = Status_e::OK; // no 'interimcommit' function in v25
m_bNeedPostRollBack = false;
}
template<>
[[nodiscard]] bool Writeset_T<RAW25::WrappedWsrep_t>::Replicate()
{
m_eLastRes = Status_e::OK; // no 'replicate' function in v25
return true;
}
Wsrep::Provider_i* MakeProviderV25 ( WsrepLoader_t tLoader, Wsrep::Cluster_i* pCluster, CSphString sName, const char* szListenAddr, const char* szIncoming, const char* szPath, const char* szOptions )
{
return MakeProvider<RAW25::Provider_c>( std::move ( tLoader ), pCluster, std::move ( sName ), szListenAddr, szIncoming, szPath, szOptions );
}
| 17,285
|
C++
|
.cpp
| 437
| 36.98627
| 199
| 0.688611
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
17,020
|
grastate.cpp
|
manticoresoftware_manticoresearch/src/replication/grastate.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "grastate.h"
#include "sphinxutils.h"
#include "fileutils.h"
#include "fileio.h"
#include <optional>
const char* dGaleraFiles[] = { "grastate.dat", "galera.cache" };
const char* szGrastatePath = "%s/grastate.dat";
const char* szGrastatePathNew = "%s/grastate.dat.new";
const char* szGrastatePathOld = "%s/grastate.dat.old";
const auto sSafePattern = FROMS ( "safe_to_bootstrap" );
const auto sSafeMsg = FROMS ( "safe_to_bootstrap: 1" );
static bool FilterGrastate ( const CSphString& sPath, std::function<bool ( Str_t )> fnGraFilter )
{
auto sClusterState = SphSprintf ( szGrastatePath, sPath.cstr() );
// cluster starts well without grastate.dat file
if ( !sphIsReadable ( sClusterState ) )
return true;
TLS_MSG_STRING ( sError );
CSphAutoreader tReader;
if ( !tReader.Open ( sClusterState, sError ) )
return false;
CSphFixedVector<char> dBuf { 2048 };
SphOffset_t iStateSize = tReader.GetFilesize();
while ( tReader.GetPos() < iStateSize )
{
auto iLineLen = tReader.GetLine ( dBuf.Begin(), (int)dBuf.GetLengthBytes() );
if ( !fnGraFilter ( { dBuf.Begin(), iLineLen } ) )
break;
}
return true;
}
bool CheckClusterNew ( const CSphString& sPath )
{
std::optional<int> iVal;
FilterGrastate ( sPath, [&iVal] ( Str_t sLine ) {
auto dKeyVal = sphSplit ( sLine.first, sLine.second, ":" );
if ( dKeyVal.GetLength() < 2 )
return true;
dKeyVal.Apply ( [] ( CSphString& sVal ) { sVal.Trim(); } );
if ( dKeyVal[0] != sSafePattern )
return true;
assert ( dKeyVal.GetLength() > 2 );
iVal = atoi ( dKeyVal[1].cstr() );
return false;
} );
return !iVal.has_value() // can start cluster without any safe_to_bootstrap flag
|| iVal == 1
|| TlsMsg::Err ( "can not start cluster without 'safe_to_bootstrap: 1' at the '%s/grastate.dat', got '%d', use '--new-cluster-force' to bootstrap this node and bypassing cluster restart protection", sPath.cstr(), iVal.value_or(0) );
}
// set safe_to_bootstrap: 1 at cluster/grastate.dat for Galera to start properly
bool NewClusterForce ( const CSphString& sPath )
{
auto sClusterState = SphSprintf ( szGrastatePath, sPath.cstr() );
auto sNewState = SphSprintf ( szGrastatePathNew, sPath.cstr() );
auto sOldState = SphSprintf ( szGrastatePathOld, sPath.cstr() );
TLS_MSG_STRING ( sError );
CSphWriter tWriter;
if ( !tWriter.OpenFile ( sNewState, sError ) )
return false;
FilterGrastate ( sPath, [&tWriter] ( Str_t sLine ) {
AT_SCOPE_EXIT ( [&]() { tWriter.PutBytes ( sLine.first, sLine.second ); tWriter.PutByte ( '\n' ); } );
auto dKeyVal = sphSplit ( sLine.first, sLine.second, ":" );
if ( dKeyVal.GetLength() < 2 )
return true;
dKeyVal.Apply ( [] ( CSphString& sVal ) { sVal.Trim(); } );
if ( dKeyVal[0] == sSafePattern )
sLine = sSafeMsg;
return true;
} );
if ( TlsMsg::HasErr() || tWriter.IsError() )
return false;
tWriter.CloseFile();
if ( sph::rename ( sClusterState.cstr(), sOldState.cstr() ) != 0 )
return TlsMsg::Err ( "failed to rename %s to %s", sClusterState.cstr(), sOldState.cstr() );
if ( sph::rename ( sNewState.cstr(), sClusterState.cstr() ) != 0 )
return TlsMsg::Err ( "failed to rename %s to %s", sNewState.cstr(), sClusterState.cstr() );
::unlink ( sOldState.cstr() );
return true;
}
// clean up Galera files at cluster path to start new and fresh cluster again
void CleanClusterFiles ( const CSphString& sPath )
{
for ( const char* szFile : dGaleraFiles )
::unlink ( SphSprintf ( "%s/%s", sPath.cstr(), szFile ).cstr() );
}
| 3,873
|
C++
|
.cpp
| 94
| 38.904255
| 234
| 0.698214
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.