text
stringlengths
5
1.04M
/* The copyright in this software is being made available under the BSD * License, included below. This software may be subject to other third party * and contributor rights, including patent rights, and no such rights are * granted under this license. * * Copyright (c) 2010-2020, ITU/ISO/IEC * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the ITU/ISO/IEC nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /** \file LoopFilter.cpp \brief deblocking filter */ #include "LoopFilter.h" #include "Slice.h" #include "Mv.h" #include "Unit.h" #include "UnitTools.h" #include "UnitPartitioner.h" #include "dtrace_codingstruct.h" #include "dtrace_buffer.h" //! \ingroup CommonLib //! \{ // ==================================================================================================================== // Constants // ==================================================================================================================== //#define EDGE_VER 0 //#define EDGE_HOR 1 #define DEBLOCK_SMALLEST_BLOCK 8 #define DEFAULT_INTRA_TC_OFFSET 2 ///< Default intra TC offset // ==================================================================================================================== // Tables // ==================================================================================================================== const uint16_t LoopFilter::sm_tcTable[MAX_QP + 1 + DEFAULT_INTRA_TC_OFFSET] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,4,4,4,5,5,5,5,7,7,8,9,10,10,11,13,14,15,17,19,21,24,25,29,33,36,41,45,51,57,64,71,80,89,100,112,125,141,157,177,198,222,250,280,314,352,395 }; const uint8_t LoopFilter::sm_betaTable[MAX_QP + 1] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,7,8,9,10,11,12,13,14,15,16,17,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64 , 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88 }; inline static uint32_t getRasterIdx(const Position& pos, const PreCalcValues& pcv) { return ( ( pos.x & pcv.maxCUWidthMask ) >> pcv.minCUWidthLog2 ) + ( ( pos.y & pcv.maxCUHeightMask ) >> pcv.minCUHeightLog2 ) * pcv.partsInCtuWidth; } // ==================================================================================================================== // utility functions // ==================================================================================================================== #if JVET_O1143_LPF_ACROSS_SUBPIC_BOUNDARY static bool isAvailableLeft( const CodingUnit& cu, const CodingUnit& cu2, const bool bEnforceSliceRestriction, const bool bEnforceTileRestriction, const bool bEnforceSubPicRestriction) #else static bool isAvailableLeft( const CodingUnit& cu, const CodingUnit& cu2, const bool bEnforceSliceRestriction, const bool bEnforceTileRestriction) #endif { #if JVET_O1143_LPF_ACROSS_SUBPIC_BOUNDARY return ((!bEnforceSliceRestriction || CU::isSameSlice(cu, cu2)) && (!bEnforceTileRestriction || CU::isSameTile(cu, cu2)) && (!bEnforceSubPicRestriction || CU::isSameSubPic(cu, cu2))); #else return ((!bEnforceSliceRestriction || CU::isSameSlice(cu, cu2)) && (!bEnforceTileRestriction || CU::isSameTile(cu, cu2))); #endif } #if JVET_O1143_LPF_ACROSS_SUBPIC_BOUNDARY static bool isAvailableAbove( const CodingUnit& cu, const CodingUnit& cu2, const bool bEnforceSliceRestriction, const bool bEnforceTileRestriction, const bool bEnforceSubPicRestriction) #else static bool isAvailableAbove( const CodingUnit& cu, const CodingUnit& cu2, const bool bEnforceSliceRestriction, const bool bEnforceTileRestriction) #endif { #if JVET_O1143_LPF_ACROSS_SUBPIC_BOUNDARY return ( !bEnforceSliceRestriction || CU::isSameSlice( cu, cu2 ) ) && ( !bEnforceTileRestriction || CU::isSameTile( cu, cu2 ) ) && (!bEnforceSubPicRestriction || CU::isSameSubPic(cu, cu2)); #else return ( !bEnforceSliceRestriction || CU::isSameSlice( cu, cu2 ) ) && ( !bEnforceTileRestriction || CU::isSameTile( cu, cu2 ) ) ; #endif } // ==================================================================================================================== // Constructor / destructor / create / destroy // ==================================================================================================================== LoopFilter::LoopFilter() { } LoopFilter::~LoopFilter() { } // ==================================================================================================================== // Public member functions // ==================================================================================================================== void LoopFilter::create( const unsigned uiMaxCUDepth ) { destroy(); const unsigned numPartitions = 1 << ( uiMaxCUDepth << 1 ); for( int edgeDir = 0; edgeDir < NUM_EDGE_DIR; edgeDir++ ) { m_aapucBS [edgeDir].resize( numPartitions ); m_aapbEdgeFilter[edgeDir].resize( numPartitions ); } m_enc = false; } void LoopFilter::initEncPicYuvBuffer(ChromaFormat chromaFormat, int lumaWidth, int lumaHeight) { const UnitArea picArea(chromaFormat, Area(0, 0, lumaWidth, lumaHeight)); m_encPicYuvBuffer.destroy(); m_encPicYuvBuffer.create(picArea); } void LoopFilter::destroy() { for( int edgeDir = 0; edgeDir < NUM_EDGE_DIR; edgeDir++ ) { m_aapucBS [edgeDir].clear(); m_aapbEdgeFilter[edgeDir].clear(); } m_encPicYuvBuffer.destroy(); } /** - call deblocking function for every CU . \param pcPic picture class (Pic) pointer */ void LoopFilter::loopFilterPic( CodingStructure& cs ) { const PreCalcValues& pcv = *cs.pcv; m_shiftHor = ::getComponentScaleX( COMPONENT_Cb, cs.pcv->chrFormat ); m_shiftVer = ::getComponentScaleY( COMPONENT_Cb, cs.pcv->chrFormat ); DTRACE_UPDATE( g_trace_ctx, ( std::make_pair( "poc", cs.slice->getPOC() ) ) ); #if ENABLE_TRACING for( int y = 0; y < pcv.heightInCtus; y++ ) { for( int x = 0; x < pcv.widthInCtus; x++ ) { const UnitArea ctuArea( pcv.chrFormat, Area( x << pcv.maxCUWidthLog2, y << pcv.maxCUHeightLog2, pcv.maxCUWidth, pcv.maxCUWidth ) ); DTRACE ( g_trace_ctx, D_CRC, "CTU %d %d", ctuArea.Y().x, ctuArea.Y().y ); DTRACE_CRC( g_trace_ctx, D_CRC, cs, cs.picture->getRecoBuf( clipArea( ctuArea, *cs.picture ) ), &ctuArea.Y() ); } } #endif for( int y = 0; y < pcv.heightInCtus; y++ ) { for( int x = 0; x < pcv.widthInCtus; x++ ) { memset( m_aapucBS [EDGE_VER].data(), 0, m_aapucBS [EDGE_VER].byte_size() ); memset( m_aapbEdgeFilter[EDGE_VER].data(), false, m_aapbEdgeFilter[EDGE_VER].byte_size() ); memset( m_maxFilterLengthP, 0, sizeof(m_maxFilterLengthP) ); memset( m_maxFilterLengthQ, 0, sizeof(m_maxFilterLengthQ) ); memset( m_transformEdge, false, sizeof(m_transformEdge) ); m_ctuXLumaSamples = x << pcv.maxCUWidthLog2; m_ctuYLumaSamples = y << pcv.maxCUHeightLog2; const UnitArea ctuArea( pcv.chrFormat, Area( x << pcv.maxCUWidthLog2, y << pcv.maxCUHeightLog2, pcv.maxCUWidth, pcv.maxCUWidth ) ); CodingUnit* firstCU = cs.getCU( ctuArea.lumaPos(), CH_L); cs.slice = firstCU->slice; // CU-based deblocking for( auto &currCU : cs.traverseCUs( CS::getArea( cs, ctuArea, CH_L ), CH_L ) ) { xDeblockCU( currCU, EDGE_VER ); } if( CS::isDualITree( cs ) ) { memset( m_aapucBS [EDGE_VER].data(), 0, m_aapucBS [EDGE_VER].byte_size() ); memset( m_aapbEdgeFilter[EDGE_VER].data(), false, m_aapbEdgeFilter[EDGE_VER].byte_size() ); memset( m_maxFilterLengthP, 0, sizeof(m_maxFilterLengthP) ); memset( m_maxFilterLengthQ, 0, sizeof(m_maxFilterLengthQ) ); memset( m_transformEdge, false, sizeof(m_transformEdge) ); for( auto &currCU : cs.traverseCUs( CS::getArea( cs, ctuArea, CH_C ), CH_C ) ) { xDeblockCU( currCU, EDGE_VER ); } } } } // Vertical filtering for( int y = 0; y < pcv.heightInCtus; y++ ) { for( int x = 0; x < pcv.widthInCtus; x++ ) { memset( m_aapucBS [EDGE_HOR].data(), 0, m_aapucBS [EDGE_HOR].byte_size() ); memset( m_aapbEdgeFilter[EDGE_HOR].data(), false, m_aapbEdgeFilter[EDGE_HOR].byte_size() ); memset( m_maxFilterLengthP, 0, sizeof(m_maxFilterLengthP) ); memset( m_maxFilterLengthQ, 0, sizeof(m_maxFilterLengthQ) ); memset( m_transformEdge, false, sizeof(m_transformEdge) ); m_ctuXLumaSamples = x << pcv.maxCUWidthLog2; m_ctuYLumaSamples = y << pcv.maxCUHeightLog2; const UnitArea ctuArea( pcv.chrFormat, Area( x << pcv.maxCUWidthLog2, y << pcv.maxCUHeightLog2, pcv.maxCUWidth, pcv.maxCUWidth ) ); CodingUnit* firstCU = cs.getCU( ctuArea.lumaPos(), CH_L); cs.slice = firstCU->slice; // CU-based deblocking for( auto &currCU : cs.traverseCUs( CS::getArea( cs, ctuArea, CH_L ), CH_L ) ) { xDeblockCU( currCU, EDGE_HOR ); } if( CS::isDualITree( cs ) ) { memset( m_aapucBS [EDGE_HOR].data(), 0, m_aapucBS [EDGE_HOR].byte_size() ); memset( m_aapbEdgeFilter[EDGE_HOR].data(), false, m_aapbEdgeFilter[EDGE_HOR].byte_size() ); memset( m_maxFilterLengthP, 0, sizeof(m_maxFilterLengthP) ); memset( m_maxFilterLengthQ, 0, sizeof(m_maxFilterLengthQ) ); memset( m_transformEdge, false, sizeof(m_transformEdge) ); for( auto &currCU : cs.traverseCUs( CS::getArea( cs, ctuArea, CH_C ), CH_C ) ) { xDeblockCU( currCU, EDGE_HOR ); } } } } DTRACE_PIC_COMP(D_REC_CB_LUMA_LF, cs, cs.getRecoBuf(), COMPONENT_Y); DTRACE_PIC_COMP(D_REC_CB_CHROMA_LF, cs, cs.getRecoBuf(), COMPONENT_Cb); DTRACE_PIC_COMP(D_REC_CB_CHROMA_LF, cs, cs.getRecoBuf(), COMPONENT_Cr); DTRACE ( g_trace_ctx, D_CRC, "LoopFilter" ); DTRACE_CRC( g_trace_ctx, D_CRC, cs, cs.getRecoBuf() ); } void LoopFilter::resetFilterLengths() { memset(m_aapucBS[EDGE_VER].data(), 0, m_aapucBS[EDGE_VER].byte_size()); memset(m_aapbEdgeFilter[EDGE_VER].data(), false, m_aapbEdgeFilter[EDGE_VER].byte_size()); memset(m_aapucBS[EDGE_HOR].data(), 0, m_aapucBS[EDGE_HOR].byte_size()); memset(m_aapbEdgeFilter[EDGE_HOR].data(), false, m_aapbEdgeFilter[EDGE_HOR].byte_size()); memset(m_maxFilterLengthP, 0, sizeof(m_maxFilterLengthP)); memset(m_maxFilterLengthQ, 0, sizeof(m_maxFilterLengthQ)); memset(m_transformEdge, false, sizeof(m_transformEdge)); } // ==================================================================================================================== // Protected member functions // ==================================================================================================================== /** Deblocking filter process in CU-based (the same function as conventional's) \param cu the CU to be deblocked \param edgeDir the direction of the edge in block boundary (horizontal/vertical), which is added newly */ void LoopFilter::xDeblockCU( CodingUnit& cu, const DeblockEdgeDir edgeDir ) { const PreCalcValues& pcv = *cu.cs->pcv; const Area area = cu.Y().valid() ? cu.Y() : Area( recalcPosition( cu.chromaFormat, cu.chType, CHANNEL_TYPE_LUMA, cu.blocks[cu.chType].pos() ), recalcSize( cu.chromaFormat, cu.chType, CHANNEL_TYPE_LUMA, cu.blocks[cu.chType].size() ) ); bool horEdgeFilter = false, verEdgeFilter = false; int numHorVirBndry = 0, numVerVirBndry = 0; int horVirBndryPos[] = { 0, 0, 0 }; int verVirBndryPos[] = { 0, 0, 0 }; bool isCuCrossedByVirtualBoundaries = isCrossedByVirtualBoundaries( area.x, area.y, area.width, area.height, numHorVirBndry, numVerVirBndry, horVirBndryPos, verVirBndryPos, cu.cs->picHeader ); xSetLoopfilterParam( cu ); static_vector<int, 2*MAX_CU_SIZE> edgeIdx; edgeIdx.clear(); if (m_enc) { m_shiftHor = ::getComponentScaleX(COMPONENT_Cb, cu.chromaFormat); m_shiftVer = ::getComponentScaleY(COMPONENT_Cb, cu.chromaFormat); int x, y; if (cu.Y().valid()) { x = cu.block(COMPONENT_Y).x; y = cu.block(COMPONENT_Y).y; } else { x = cu.block(COMPONENT_Cb).x << m_shiftHor; y = cu.block(COMPONENT_Cb).y << m_shiftVer; } m_ctuXLumaSamples = x & ~(cu.slice->getSPS()->getMaxCUWidth() - 1); m_ctuYLumaSamples = y & ~(cu.slice->getSPS()->getMaxCUHeight() - 1); } for( auto &currTU : CU::traverseTUs( cu ) ) { const Area& areaTu = cu.Y().valid() ? currTU.block( COMPONENT_Y ) : Area( recalcPosition( cu.chromaFormat, cu.chType, CHANNEL_TYPE_LUMA, currTU.blocks[cu.chType].pos() ), recalcSize( cu.chromaFormat, cu.chType, CHANNEL_TYPE_LUMA, currTU.blocks[cu.chType].size() ) ); verEdgeFilter = m_stLFCUParam.internalEdge; horEdgeFilter = m_stLFCUParam.internalEdge; if( edgeDir == EDGE_HOR && ((areaTu.y % 4) != 0) ) { continue; } if( edgeDir == EDGE_VER && ((areaTu.x % 4) != 0) ) { continue; } if( isCuCrossedByVirtualBoundaries ) { xDeriveEdgefilterParam( areaTu.x, areaTu.y, numVerVirBndry, numHorVirBndry, verVirBndryPos, horVirBndryPos, verEdgeFilter, horEdgeFilter ); } xSetEdgefilterMultiple( cu, EDGE_VER, areaTu, verEdgeFilter ); xSetEdgefilterMultiple( cu, EDGE_HOR, areaTu, horEdgeFilter ); xSetMaxFilterLengthPQFromTransformSizes( edgeDir, cu, currTU ); if( cu.Y().valid() ) { edgeIdx.push_back( ( edgeDir == EDGE_HOR ) ? ( currTU.blocks[cu.chType].y - cu.blocks[cu.chType].y ) / 4 : ( currTU.blocks[cu.chType].x - cu.blocks[cu.chType].x ) / 4 ); } else { edgeIdx.push_back( ( edgeDir == EDGE_HOR ) ? (( currTU.blocks[cu.chType].y - cu.blocks[cu.chType].y ) << ::getComponentScaleY(COMPONENT_Cb, cu.chromaFormat)) / 4 : (( currTU.blocks[cu.chType].x - cu.blocks[cu.chType].x ) << ::getComponentScaleX(COMPONENT_Cb, cu.chromaFormat)) / 4 ); } } bool mvSubBlocks = false; int subBlockSize = 8; for( auto &currPU : CU::traversePUs( cu ) ) { const Area& areaPu = cu.Y().valid() ? currPU.block( COMPONENT_Y ) : area; const bool xOff = currPU.blocks[cu.chType].x != cu.blocks[cu.chType].x; const bool yOff = currPU.blocks[cu.chType].y != cu.blocks[cu.chType].y; verEdgeFilter = (xOff ? m_stLFCUParam.internalEdge : m_stLFCUParam.leftEdge); horEdgeFilter = (yOff ? m_stLFCUParam.internalEdge : m_stLFCUParam.topEdge); if( isCuCrossedByVirtualBoundaries ) { xDeriveEdgefilterParam( areaPu.x, areaPu.y, numVerVirBndry, numHorVirBndry, verVirBndryPos, horVirBndryPos, verEdgeFilter, horEdgeFilter ); } xSetEdgefilterMultiple( cu, EDGE_VER, areaPu, verEdgeFilter, xOff ); xSetEdgefilterMultiple( cu, EDGE_HOR, areaPu, horEdgeFilter, yOff ); edgeIdx.push_back( ( edgeDir == EDGE_HOR ) ? ( currPU.blocks[cu.chType].y - cu.blocks[cu.chType].y ) / 4 : ( currPU.blocks[cu.chType].x - cu.blocks[cu.chType].x ) / 4 ); if ((currPU.mergeFlag && (currPU.mergeType == MRG_TYPE_SUBPU_ATMVP)) || cu.affine) { mvSubBlocks = true; if (edgeDir == EDGE_HOR) { for (uint32_t off = subBlockSize; off < areaPu.height; off += subBlockSize) { const Area mvBlockH(cu.Y().x, cu.Y().y + off, cu.Y().width, pcv.minCUHeight); horEdgeFilter = m_stLFCUParam.internalEdge; if( isCuCrossedByVirtualBoundaries ) { xDeriveEdgefilterParam( mvBlockH.x, mvBlockH.y, 0, numHorVirBndry, verVirBndryPos, horVirBndryPos, verEdgeFilter, horEdgeFilter ); } xSetEdgefilterMultiple(cu, EDGE_HOR, mvBlockH, horEdgeFilter, 1); edgeIdx.push_back( ( currPU.blocks[cu.chType].y + off - cu.blocks[cu.chType].y ) / 4 ); } } else { for (uint32_t off = subBlockSize; off < areaPu.width; off += subBlockSize) { const Area mvBlockV(cu.Y().x + off, cu.Y().y, pcv.minCUWidth, cu.Y().height); verEdgeFilter = m_stLFCUParam.internalEdge; if( isCuCrossedByVirtualBoundaries ) { xDeriveEdgefilterParam( mvBlockV.x, mvBlockV.y, numVerVirBndry, 0, verVirBndryPos, horVirBndryPos, verEdgeFilter, horEdgeFilter ); } xSetEdgefilterMultiple(cu, EDGE_VER, mvBlockV, verEdgeFilter, 1); edgeIdx.push_back( ( currPU.blocks[cu.chType].x + off - cu.blocks[cu.chType].x ) / 4 ); } } } xSetMaxFilterLengthPQForCodingSubBlocks( edgeDir, cu, currPU, mvSubBlocks, subBlockSize, areaPu ); } const unsigned uiPelsInPart = pcv.minCUWidth; for( int y = 0; y < area.height; y += uiPelsInPart ) { for( int x = 0; x < area.width; x += uiPelsInPart ) { unsigned uiBSCheck = 1; const Position localPos { area.x + x, area.y + y }; const unsigned rasterIdx = getRasterIdx( localPos, pcv ); if( m_aapbEdgeFilter[edgeDir][rasterIdx] && uiBSCheck ) { char bS = 0; if(cu.treeType != TREE_C) { bS |= xGetBoundaryStrengthSingle( cu, edgeDir, localPos, CHANNEL_TYPE_LUMA ); } if(cu.treeType != TREE_L && cu.chromaFormat != CHROMA_400) { bS |= xGetBoundaryStrengthSingle( cu, edgeDir, localPos, CHANNEL_TYPE_CHROMA ); } m_aapucBS[edgeDir][rasterIdx] = bS; } } } std::sort( edgeIdx.begin(), edgeIdx.end() ); int prevEdgeIdx = -1; for ( const int& edge : edgeIdx ) { if ( edge == prevEdgeIdx ) // skip duplicate edgeIdx marked by both transform and coding subblock processes { continue; } prevEdgeIdx = edge; if ( cu.blocks[COMPONENT_Y].valid() ) { xEdgeFilterLuma( cu, edgeDir, edge ); } #if JVET_Q0438_MONOCHROME_BUGFIXES if ( pcv.chrFormat != CHROMA_400 && cu.blocks[COMPONENT_Cb].valid() ) #else if ( cu.blocks[COMPONENT_Cb].valid() && pcv.chrFormat != CHROMA_400 ) #endif { if ( !cu.ispMode || edge == 0 ) { xEdgeFilterChroma( cu, edgeDir, edge ); } } } } inline bool LoopFilter::isCrossedByVirtualBoundaries(const int xPos, const int yPos, const int width, const int height, int& numHorVirBndry, int& numVerVirBndry, int horVirBndryPos[], int verVirBndryPos[], const PicHeader* picHeader ) { numHorVirBndry = 0; numVerVirBndry = 0; if (picHeader->getLoopFilterAcrossVirtualBoundariesDisabledFlag()) { for (int i = 0; i < picHeader->getNumHorVirtualBoundaries(); i++) { if (yPos <= picHeader->getVirtualBoundariesPosY(i) && picHeader->getVirtualBoundariesPosY(i) < yPos + height) { horVirBndryPos[numHorVirBndry++] = picHeader->getVirtualBoundariesPosY(i); } } for (int i = 0; i < picHeader->getNumVerVirtualBoundaries(); i++) { if (xPos <= picHeader->getVirtualBoundariesPosX(i) && picHeader->getVirtualBoundariesPosX(i) < xPos + width) { verVirBndryPos[numVerVirBndry++] = picHeader->getVirtualBoundariesPosX(i); } } } return numHorVirBndry > 0 || numVerVirBndry > 0; } inline void LoopFilter::xDeriveEdgefilterParam( const int xPos, const int yPos, const int numVerVirBndry, const int numHorVirBndry, const int verVirBndryPos[], const int horVirBndryPos[], bool &verEdgeFilter, bool &horEdgeFilter ) { for (int i = 0; i < numVerVirBndry; i++) { if (verVirBndryPos[i] == xPos) { verEdgeFilter = false; break; } } for (int i = 0; i < numHorVirBndry; i++) { if (horVirBndryPos[i] == yPos) { horEdgeFilter = false; break; } } } void LoopFilter::xSetMaxFilterLengthPQFromTransformSizes( const DeblockEdgeDir edgeDir, const CodingUnit& cu, const TransformUnit& currTU ) { const TransformUnit& tuQ = currTU; if ( edgeDir == EDGE_HOR ) { #if JVET_Q0438_MONOCHROME_BUGFIXES for ( int cIdx = 0; cIdx < ::getNumberValidComponents(tuQ.chromaFormat); cIdx++ ) // per component #else for ( int cIdx = 0; cIdx < MAX_NUM_COMPONENT; cIdx++ ) // per component #endif { const ComponentID comp = ComponentID(cIdx); const ChannelType ch = toChannelType(comp); const int shiftHor = ( ( ch == CH_L ) ? 0 : m_shiftHor ); const int shiftVer = ( ( ch == CH_L ) ? 0 : m_shiftVer ); const int ctuXOff = currTU.block(comp).x - ( m_ctuXLumaSamples >> shiftHor ); // x offset from left edge of CTU in respective channel sample units const int ctuYOff = currTU.block(comp).y - ( m_ctuYLumaSamples >> shiftVer ); // y offset from top edge of CTU in respective channel sample units const int minCUWidth = cu.cs->pcv->minCUWidth >> shiftHor; if ( currTU.block(comp).valid() && ( ( currTU.block(comp).y == cu.block(comp).y ) ? m_stLFCUParam.topEdge : m_stLFCUParam.internalEdge ) ) // Edge deblocking needs to be recomputed since ISP contains whole CU chroma transforms in last TU of the CU { for ( int x = 0; x < currTU.blocks[cIdx].width; x += minCUWidth ) { const Position posQ = Position( currTU.blocks[ch].x + x, currTU.blocks[ch].y ); const Position posP = posQ.offset( 0, -1 ); const int sizeQSide = tuQ.block(comp).height; const TransformUnit& tuP = *cu.cs->getTU( posP, ch ); const int sizePSide = tuP.block(comp).height; m_transformEdge[cIdx][ctuXOff+x][ctuYOff] = true; if ( comp == COMPONENT_Y ) { bool smallBlock = (sizePSide <= 4) || (sizeQSide <= 4); if (smallBlock) { m_maxFilterLengthQ[cIdx][ctuXOff + x][ctuYOff] = 1; m_maxFilterLengthP[cIdx][ctuXOff + x][ctuYOff] = 1; } else { m_maxFilterLengthQ[cIdx][ctuXOff + x][ctuYOff] = (sizeQSide >= 32) ? 7 : 3; m_maxFilterLengthP[cIdx][ctuXOff + x][ctuYOff] = (sizePSide >= 32) ? 7 : 3; } } else { m_maxFilterLengthQ[cIdx][ctuXOff+x][ctuYOff] = ( sizeQSide >= 8 && sizePSide >= 8 ) ? 3 : 1; m_maxFilterLengthP[cIdx][ctuXOff+x][ctuYOff] = ( sizeQSide >= 8 && sizePSide >= 8 ) ? 3 : 1; } } } } } if ( edgeDir == EDGE_VER ) { #if JVET_Q0438_MONOCHROME_BUGFIXES for ( int cIdx = 0; cIdx < ::getNumberValidComponents(tuQ.chromaFormat); cIdx++ ) // per component #else for ( int cIdx = 0; cIdx < MAX_NUM_COMPONENT; cIdx++ ) // per component #endif { const ComponentID comp = ComponentID(cIdx); const ChannelType ch = toChannelType(comp); const int shiftHor = ( ( ch == CH_L ) ? 0 : m_shiftHor ); const int shiftVer = ( ( ch == CH_L ) ? 0 : m_shiftVer ); const int ctuXOff = currTU.block(comp).x - ( m_ctuXLumaSamples >> shiftHor ); // x offset from left edge of CTU in respective channel sample units const int ctuYOff = currTU.block(comp).y - ( m_ctuYLumaSamples >> shiftVer ); // y offset from top edge of CTU in respective channel sample units const int minCUHeight = cu.cs->pcv->minCUHeight >> shiftVer; if ( currTU.block(comp).valid() && ( ( currTU.block(comp).x == cu.block(comp).x ) ? m_stLFCUParam.leftEdge : m_stLFCUParam.internalEdge ) ) // Edge deblocking needs to be recomputed since ISP contains whole CU chroma transforms in last TU of the CU { for ( int y = 0; y < currTU.blocks[cIdx].height; y += minCUHeight ) { const Position posQ = Position( currTU.blocks[ch].x, currTU.blocks[ch].y + y ); const Position posP = posQ.offset( -1, 0 ); const int sizeQSide = tuQ.block(comp).width; const TransformUnit& tuP = *cu.cs->getTU( posP, ch ); const int sizePSide = tuP.block(comp).width; m_transformEdge[cIdx][ctuXOff][ctuYOff+y] = true; if ( comp == COMPONENT_Y ) { bool smallBlock = (sizePSide <= 4) || (sizeQSide <= 4); if (smallBlock) { m_maxFilterLengthQ[cIdx][ctuXOff][ctuYOff + y] = 1; m_maxFilterLengthP[cIdx][ctuXOff][ctuYOff + y] = 1; } else { m_maxFilterLengthQ[cIdx][ctuXOff][ctuYOff + y] = (sizeQSide >= 32) ? 7 : 3; m_maxFilterLengthP[cIdx][ctuXOff][ctuYOff + y] = (sizePSide >= 32) ? 7 : 3; } } else { m_maxFilterLengthQ[cIdx][ctuXOff][ctuYOff+y] = ( sizeQSide >= 8 && sizePSide >= 8 ) ? 3 : 1; m_maxFilterLengthP[cIdx][ctuXOff][ctuYOff+y] = ( sizeQSide >= 8 && sizePSide >= 8 ) ? 3 : 1; } } } } } } void LoopFilter::xSetMaxFilterLengthPQForCodingSubBlocks( const DeblockEdgeDir edgeDir, const CodingUnit& cu, const PredictionUnit& currPU, const bool& mvSubBlocks, const int& subBlockSize, const Area& areaPu ) { if ( mvSubBlocks && currPU.Y().valid() ) { const int cIdx = 0; const ComponentID comp = ComponentID(cIdx); const int ctuYOff = currPU.block(comp).y - m_ctuYLumaSamples; // y offset from top edge of CTU in luma samples const int ctuXOff = currPU.block(comp).x - m_ctuXLumaSamples; // x offset from left edge of CTU in luma samples const int minCUWidth = cu.cs->pcv->minCUWidth; const int minCUHeight = cu.cs->pcv->minCUHeight; if ( edgeDir == EDGE_HOR ) { for ( int y = 0; y < areaPu.height; y += subBlockSize ) { for ( int x = 0; x < areaPu.width; x += minCUWidth ) { if ( m_transformEdge[cIdx][ctuXOff+x][ctuYOff+y] ) { m_maxFilterLengthQ[cIdx][ctuXOff+x][ctuYOff+y] = std::min<int>(m_maxFilterLengthQ[cIdx][ctuXOff+x][ctuYOff+y], 5); if ( y > 0 ) { m_maxFilterLengthP[cIdx][ctuXOff+x][ctuYOff+y] = std::min<int>(m_maxFilterLengthP[cIdx][ctuXOff+x][ctuYOff+y], 5); } } else if (y > 0 && (m_transformEdge[cIdx][ctuXOff + x][ctuYOff + y - 4] || ((y + 4) >= areaPu.height) || m_transformEdge[cIdx][ctuXOff + x][ctuYOff + y + 4])) // adjacent to transform edge +/- 4 { m_maxFilterLengthQ[cIdx][ctuXOff + x][ctuYOff + y] = 1; m_maxFilterLengthP[cIdx][ctuXOff + x][ctuYOff + y] = 1; } else if (y > 0 && ( m_transformEdge[cIdx][ctuXOff+x][ctuYOff+y-8] || (( y + 8 ) >= areaPu.height) || m_transformEdge[cIdx][ctuXOff+x][ctuYOff+y+8] )) // adjacent to transform edge on 8x8 grid { m_maxFilterLengthQ[cIdx][ctuXOff+x][ctuYOff+y] = 2; m_maxFilterLengthP[cIdx][ctuXOff+x][ctuYOff+y] = 2; } else { m_maxFilterLengthQ[cIdx][ctuXOff+x][ctuYOff+y] = 3; m_maxFilterLengthP[cIdx][ctuXOff+x][ctuYOff+y] = 3; } } } } else // edgeDir == EDGE_VER { for ( int x = 0; x < areaPu.width; x += subBlockSize ) { for ( int y = 0; y < areaPu.height; y += minCUHeight ) { if ( m_transformEdge[cIdx][ctuXOff+x][ctuYOff+y] ) { m_maxFilterLengthQ[cIdx][ctuXOff+x][ctuYOff+y] = std::min<int>(m_maxFilterLengthQ[cIdx][ctuXOff+x][ctuYOff+y], 5); if ( x > 0 ) { m_maxFilterLengthP[cIdx][ctuXOff+x][ctuYOff+y] = std::min<int>(m_maxFilterLengthP[cIdx][ctuXOff+x][ctuYOff+y], 5); } } else if (x > 0 && (m_transformEdge[cIdx][ctuXOff + x - 4][ctuYOff + y] || ((x + 4) >= areaPu.width) || m_transformEdge[cIdx][ctuXOff + x + 4][ctuYOff + y])) // adjacent to transform edge +/- 4 { m_maxFilterLengthQ[cIdx][ctuXOff + x][ctuYOff + y] = 1; m_maxFilterLengthP[cIdx][ctuXOff + x][ctuYOff + y] = 1; } else if ( x > 0 && ( m_transformEdge[cIdx][ctuXOff+x-8][ctuYOff+y] || ( (x + 8) >= areaPu.width ) || m_transformEdge[cIdx][ctuXOff+x+8][ctuYOff+y] ) ) // adjacent to transform edge on 8x8 grid { m_maxFilterLengthQ[cIdx][ctuXOff+x][ctuYOff+y] = 2; m_maxFilterLengthP[cIdx][ctuXOff+x][ctuYOff+y] = 2; } else { m_maxFilterLengthQ[cIdx][ctuXOff+x][ctuYOff+y] = 3; m_maxFilterLengthP[cIdx][ctuXOff+x][ctuYOff+y] = 3; } } } } } } void LoopFilter::xSetEdgefilterMultiple( const CodingUnit& cu, const DeblockEdgeDir edgeDir, const Area& area, const bool bValue, const bool EdgeIdx ) { const PreCalcValues& pcv = *cu.cs->pcv; const unsigned uiAdd = ( edgeDir == EDGE_VER ) ? pcv.partsInCtuWidth : 1; const unsigned uiNumElem = ( edgeDir == EDGE_VER ) ? ( area.height / pcv.minCUHeight ) : ( area.width / pcv.minCUWidth ); unsigned uiBsIdx = getRasterIdx( area, pcv ); for( int ui = 0; ui < uiNumElem; ui++ ) { m_aapbEdgeFilter[edgeDir][uiBsIdx] = bValue; if ( m_aapucBS[edgeDir][uiBsIdx] && bValue ) { m_aapucBS[edgeDir][uiBsIdx] = 3; // both the TU and PU edge } else { if( ! EdgeIdx ) { m_aapucBS[edgeDir][uiBsIdx] = bValue; } } uiBsIdx += uiAdd; } } void LoopFilter::xSetLoopfilterParam( const CodingUnit& cu ) { const Slice& slice = *cu.slice; const PPS& pps = *cu.cs->pps; if( slice.getDeblockingFilterDisable() ) { m_stLFCUParam.leftEdge = m_stLFCUParam.topEdge = m_stLFCUParam.internalEdge = false; return; } const Position& pos = cu.blocks[cu.chType].pos(); m_stLFCUParam.internalEdge = true; #if JVET_O1143_LPF_ACROSS_SUBPIC_BOUNDARY m_stLFCUParam.leftEdge = ( 0 < pos.x ) && isAvailableLeft ( cu, *cu.cs->getCU( pos.offset( -1, 0 ), cu.chType ), !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag(), !pps.getSubPicFromCU(cu).getloopFilterAcrossEnabledFlag() ); #else m_stLFCUParam.leftEdge = ( 0 < pos.x ) && isAvailableLeft ( cu, *cu.cs->getCU( pos.offset( -1, 0 ), cu.chType ), !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag()); #endif #if JVET_O1143_LPF_ACROSS_SUBPIC_BOUNDARY m_stLFCUParam.topEdge = ( 0 < pos.y ) && isAvailableAbove( cu, *cu.cs->getCU( pos.offset( 0, -1 ), cu.chType ), !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag(), !pps.getSubPicFromCU(cu).getloopFilterAcrossEnabledFlag() ); #else m_stLFCUParam.topEdge = ( 0 < pos.y ) && isAvailableAbove( cu, *cu.cs->getCU( pos.offset( 0, -1 ), cu.chType ), !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag()); #endif } unsigned LoopFilter::xGetBoundaryStrengthSingle ( const CodingUnit& cu, const DeblockEdgeDir edgeDir, const Position& localPos, const ChannelType chType ) const { // The boundary strength that is output by the function xGetBoundaryStrengthSingle is a multi component boundary strength that contains boundary strength for luma (bits 0 to 1), cb (bits 2 to 3) and cr (bits 4 to 5). const Slice& sliceQ = *cu.slice; int shiftHor = cu.Y().valid() ? 0 : ::getComponentScaleX(COMPONENT_Cb, cu.firstPU->chromaFormat); int shiftVer = cu.Y().valid() ? 0 : ::getComponentScaleY(COMPONENT_Cb, cu.firstPU->chromaFormat); const Position& posQ = Position{ localPos.x >> shiftHor, localPos.y >> shiftVer }; const Position posP = ( edgeDir == EDGE_VER ) ? posQ.offset( -1, 0 ) : posQ.offset( 0, -1 ); const CodingUnit& cuQ = cu; const CodingUnit& cuP = (chType == CHANNEL_TYPE_CHROMA && cuQ.chType == CHANNEL_TYPE_LUMA) ? *cu.cs->getCU(recalcPosition( cu.chromaFormat, CHANNEL_TYPE_LUMA, CHANNEL_TYPE_CHROMA, posP), CHANNEL_TYPE_CHROMA) : *cu.cs->getCU( posP, cu.chType ); //-- Set BS for Intra MB : BS = 4 or 3 if( ( MODE_INTRA == cuP.predMode ) || ( MODE_INTRA == cuQ.predMode ) ) { if( chType == CHANNEL_TYPE_LUMA ) { int bsY = (MODE_INTRA == cuP.predMode && cuP.bdpcmMode) && (MODE_INTRA == cuQ.predMode && cuQ.bdpcmMode) ? 0 : 2; return BsSet(bsY, COMPONENT_Y); } else { int bsC = (MODE_INTRA == cuP.predMode && cuP.bdpcmModeChroma) && (MODE_INTRA == cuQ.predMode && cuQ.bdpcmModeChroma) ? 0 : 2; return (BsSet(bsC, COMPONENT_Cb) + BsSet(bsC, COMPONENT_Cr)); } } const TransformUnit& tuQ = *cuQ.cs->getTU(posQ, cuQ.chType); const TransformUnit& tuP = (cuP.chType == CHANNEL_TYPE_CHROMA && cuQ.chType == CHANNEL_TYPE_LUMA) ? *cuP.cs->getTU(recalcPosition( cu.chromaFormat, CHANNEL_TYPE_LUMA, CHANNEL_TYPE_CHROMA, posP), CHANNEL_TYPE_CHROMA) : *cuP.cs->getTU(posP, cuQ.chType); const PreCalcValues& pcv = *cu.cs->pcv; const unsigned rasterIdx = getRasterIdx( Position{ localPos.x, localPos.y }, pcv ); if (m_aapucBS[edgeDir][rasterIdx] && (cuP.firstPU->ciipFlag || cuQ.firstPU->ciipFlag)) { if(chType == CHANNEL_TYPE_LUMA) { return BsSet(2, COMPONENT_Y); } else { return BsSet(2, COMPONENT_Cb) + BsSet(2, COMPONENT_Cr); } } unsigned tmpBs = 0; //-- Set BS for not Intra MB : BS = 2 or 1 or 0 if(chType == CHANNEL_TYPE_LUMA) { // Y if (m_aapucBS[edgeDir][rasterIdx] && (TU::getCbf(tuQ, COMPONENT_Y) || TU::getCbf(tuP, COMPONENT_Y))) { tmpBs += BsSet(1, COMPONENT_Y); } } else { #if JVET_Q0438_MONOCHROME_BUGFIXES if (pcv.chrFormat != CHROMA_400) { #endif // U if (m_aapucBS[edgeDir][rasterIdx] && (TU::getCbf(tuQ, COMPONENT_Cb) || TU::getCbf(tuP, COMPONENT_Cb) || tuQ.jointCbCr || tuP.jointCbCr)) { tmpBs += BsSet(1, COMPONENT_Cb); } // V if (m_aapucBS[edgeDir][rasterIdx] && (TU::getCbf(tuQ, COMPONENT_Cr) || TU::getCbf(tuP, COMPONENT_Cr) || tuQ.jointCbCr || tuP.jointCbCr)) { tmpBs += BsSet(1, COMPONENT_Cr); } #if JVET_Q0438_MONOCHROME_BUGFIXES } #endif } if (BsGet(tmpBs, COMPONENT_Y) == 1) { return tmpBs; } if ((cuP.firstPU->ciipFlag || cuQ.firstPU->ciipFlag)) { return 1; } if ( !cu.Y().valid() ) { return tmpBs; } // and now the pred if ( m_aapucBS[edgeDir][rasterIdx] != 0 && m_aapucBS[edgeDir][rasterIdx] != 3 ) return tmpBs; const Position& lumaPosQ = Position{ localPos.x, localPos.y }; const Position lumaPosP = ( edgeDir == EDGE_VER ) ? lumaPosQ.offset( -1, 0 ) : lumaPosQ.offset( 0, -1 ); const MotionInfo& miQ = cuQ.cs->getMotionInfo( lumaPosQ ); const MotionInfo& miP = cuP.cs->getMotionInfo( lumaPosP ); const Slice& sliceP = *cuP.slice; if (sliceQ.isInterB() || sliceP.isInterB()) { const Picture *piRefP0 = (CU::isIBC(cuP) ? sliceP.getPic() : ((0 > miP.refIdx[0]) ? NULL : sliceP.getRefPic(REF_PIC_LIST_0, miP.refIdx[0]))); const Picture *piRefP1 = (CU::isIBC(cuP) ? NULL : ((0 > miP.refIdx[1]) ? NULL : sliceP.getRefPic(REF_PIC_LIST_1, miP.refIdx[1]))); const Picture *piRefQ0 = (CU::isIBC(cuQ) ? sliceQ.getPic() : ((0 > miQ.refIdx[0]) ? NULL : sliceQ.getRefPic(REF_PIC_LIST_0, miQ.refIdx[0]))); const Picture *piRefQ1 = (CU::isIBC(cuQ) ? NULL : ((0 > miQ.refIdx[1]) ? NULL : sliceQ.getRefPic(REF_PIC_LIST_1, miQ.refIdx[1]))); Mv mvP0, mvP1, mvQ0, mvQ1; if( 0 <= miP.refIdx[0] ) { mvP0 = miP.mv[0]; } if( 0 <= miP.refIdx[1] ) { mvP1 = miP.mv[1]; } if( 0 <= miQ.refIdx[0] ) { mvQ0 = miQ.mv[0]; } if( 0 <= miQ.refIdx[1] ) { mvQ1 = miQ.mv[1]; } int nThreshold = (1 << MV_FRACTIONAL_BITS_INTERNAL) >> 1; unsigned uiBs = 0; //th can be optimized if ( ((piRefP0==piRefQ0)&&(piRefP1==piRefQ1)) || ((piRefP0==piRefQ1)&&(piRefP1==piRefQ0)) ) { if ( piRefP0 != piRefP1 ) // Different L0 & L1 { if ( piRefP0 == piRefQ0 ) { uiBs = ((abs(mvQ0.getHor() - mvP0.getHor()) >= nThreshold) || (abs(mvQ0.getVer() - mvP0.getVer()) >= nThreshold) || (abs(mvQ1.getHor() - mvP1.getHor()) >= nThreshold) || (abs(mvQ1.getVer() - mvP1.getVer()) >= nThreshold)) ? 1 : 0; } else { uiBs = ((abs(mvQ1.getHor() - mvP0.getHor()) >= nThreshold) || (abs(mvQ1.getVer() - mvP0.getVer()) >= nThreshold) || (abs(mvQ0.getHor() - mvP1.getHor()) >= nThreshold) || (abs(mvQ0.getVer() - mvP1.getVer()) >= nThreshold)) ? 1 : 0; } } else // Same L0 & L1 { uiBs = ((abs(mvQ0.getHor() - mvP0.getHor()) >= nThreshold) || (abs(mvQ0.getVer() - mvP0.getVer()) >= nThreshold) || (abs(mvQ1.getHor() - mvP1.getHor()) >= nThreshold) || (abs(mvQ1.getVer() - mvP1.getVer()) >= nThreshold)) && ((abs(mvQ1.getHor() - mvP0.getHor()) >= nThreshold) || (abs(mvQ1.getVer() - mvP0.getVer()) >= nThreshold) || (abs(mvQ0.getHor() - mvP1.getHor()) >= nThreshold) || (abs(mvQ0.getVer() - mvP1.getVer()) >= nThreshold)) ? 1 : 0; } } else // for all different Ref_Idx { uiBs = 1; } return uiBs + tmpBs; } // pcSlice->isInterP() CHECK(CU::isInter(cuP) && 0 > miP.refIdx[0], "Invalid reference picture list index"); CHECK(CU::isInter(cuP) && 0 > miQ.refIdx[0], "Invalid reference picture list index"); const Picture *piRefP0 = (CU::isIBC(cuP) ? sliceP.getPic() : sliceP.getRefPic(REF_PIC_LIST_0, miP.refIdx[0])); const Picture *piRefQ0 = (CU::isIBC(cuQ) ? sliceQ.getPic() : sliceQ.getRefPic(REF_PIC_LIST_0, miQ.refIdx[0])); if (piRefP0 != piRefQ0) { return tmpBs + 1; } Mv mvP0 = miP.mv[0]; Mv mvQ0 = miQ.mv[0]; int nThreshold = (1 << MV_FRACTIONAL_BITS_INTERNAL) >> 1; return ( ( abs( mvQ0.getHor() - mvP0.getHor() ) >= nThreshold ) || ( abs( mvQ0.getVer() - mvP0.getVer() ) >= nThreshold ) ) ? (tmpBs + 1) : tmpBs; } #if LUMA_ADAPTIVE_DEBLOCKING_FILTER_QP_OFFSET void LoopFilter::deriveLADFShift( const Pel* src, const int stride, int& shift, const DeblockEdgeDir edgeDir, const SPS sps ) { uint32_t lumaLevel = 0; shift = sps.getLadfQpOffset(0); if (edgeDir == EDGE_VER) { lumaLevel = (src[0] + src[3*stride] + src[-1] + src[3*stride - 1]) >> 2; } else // (edgeDir == EDGE_HOR) { lumaLevel = (src[0] + src[3] + src[-stride] + src[-stride + 3]) >> 2; } for ( int k = 1; k < sps.getLadfNumIntervals(); k++ ) { const int th = sps.getLadfIntervalLowerBound( k ); if ( lumaLevel > th ) { shift = sps.getLadfQpOffset( k ); } else { break; } } } #endif void LoopFilter::xEdgeFilterLuma( const CodingUnit& cu, const DeblockEdgeDir edgeDir, const int iEdge ) { const CompArea& lumaArea = cu.block(COMPONENT_Y); const PreCalcValues& pcv = *cu.cs->pcv; PelBuf picYuvRec = m_enc ? m_encPicYuvBuffer.getBuf( lumaArea ) : cu.cs->getRecoBuf( lumaArea ); Pel *piSrc = picYuvRec.buf; const int iStride = picYuvRec.stride; Pel *piTmpSrc = piSrc; const PPS &pps = *(cu.cs->pps); const SPS &sps = *(cu.cs->sps); const Slice &slice = *(cu.slice); const bool spsPaletteEnabledFlag = sps.getPLTMode(); const int bitDepthLuma = sps.getBitDepth(CHANNEL_TYPE_LUMA); const ClpRng& clpRng( cu.cs->slice->clpRng(COMPONENT_Y) ); int iQP = 0; unsigned uiNumParts = ( ( ( edgeDir == EDGE_VER ) ? lumaArea.height / pcv.minCUHeight : lumaArea.width / pcv.minCUWidth ) ); int pelsInPart = pcv.minCUWidth; unsigned uiBsAbsIdx = 0, uiBs = 0; int iOffset, iSrcStep; bool bPartPNoFilter = false; bool bPartQNoFilter = false; int betaOffsetDiv2 = slice.getDeblockingFilterBetaOffsetDiv2(); int tcOffsetDiv2 = slice.getDeblockingFilterTcOffsetDiv2(); int xoffset, yoffset; Position pos; if (edgeDir == EDGE_VER) { xoffset = 0; yoffset = pelsInPart; iOffset = 1; iSrcStep = iStride; piTmpSrc += iEdge * pelsInPart; pos = Position{ lumaArea.x + iEdge * pelsInPart, lumaArea.y - yoffset }; } else // (edgeDir == EDGE_HOR) { xoffset = pelsInPart; yoffset = 0; iOffset = iStride; iSrcStep = 1; piTmpSrc += iEdge*pelsInPart*iStride; pos = Position{ lumaArea.x - xoffset, lumaArea.y + iEdge * pelsInPart }; } const int iBitdepthScale = 1 << (bitDepthLuma - 8); // dec pos since within the loop we first calc the pos for( int iIdx = 0; iIdx < uiNumParts; iIdx++ ) { pos.x += xoffset; pos.y += yoffset; // Deblock luma boundaries on 4x4 grid only if (edgeDir == EDGE_HOR && (pos.y % 4) != 0) { continue; } if (edgeDir == EDGE_VER && (pos.x % 4) != 0) { continue; } uiBsAbsIdx = getRasterIdx( pos, pcv ); uiBs = BsGet(m_aapucBS[edgeDir][uiBsAbsIdx], COMPONENT_Y); if( uiBs ) { const CodingUnit& cuQ = cu; const CodingUnit& cuP = *cu.cs->getCU(pos.offset(xoffset - pelsInPart, yoffset - pelsInPart), cu.chType); // Derive neighboring PU index if (edgeDir == EDGE_VER) { #if JVET_O1143_LPF_ACROSS_SUBPIC_BOUNDARY if (!isAvailableLeft(cu, cuP, !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag(), !pps.getSubPicFromCU(cu).getloopFilterAcrossEnabledFlag())) #else if (!isAvailableLeft(cu, cuP, !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag())) #endif { m_aapucBS[edgeDir][uiBsAbsIdx] = uiBs = 0; continue; } } else // (iDir == EDGE_HOR) { #if JVET_O1143_LPF_ACROSS_SUBPIC_BOUNDARY if (!isAvailableAbove(cu, cuP, !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag(), !pps.getSubPicFromCU(cu).getloopFilterAcrossEnabledFlag())) #else if (!isAvailableAbove(cu, cuP, !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag())) #endif { m_aapucBS[edgeDir][uiBsAbsIdx] = uiBs = 0; continue; } } iQP = (cuP.qp + cuQ.qp + 1) >> 1; #if LUMA_ADAPTIVE_DEBLOCKING_FILTER_QP_OFFSET if ( sps.getLadfEnabled() ) { int iShift = 0; deriveLADFShift( piTmpSrc + iSrcStep * (iIdx*pelsInPart), iStride, iShift, edgeDir, sps ); iQP += iShift; } #endif bool sidePisLarge = false; bool sideQisLarge = false; int maxFilterLengthP = m_maxFilterLengthP[COMPONENT_Y][pos.x-m_ctuXLumaSamples][pos.y-m_ctuYLumaSamples]; int maxFilterLengthQ = m_maxFilterLengthQ[COMPONENT_Y][pos.x-m_ctuXLumaSamples][pos.y-m_ctuYLumaSamples]; if (maxFilterLengthP > 3) { sidePisLarge = true; if ( maxFilterLengthP > 5 ) { // restrict filter length if sub-blocks are used (e.g affine or ATMVP) if (cuP.affine) { maxFilterLengthP = std::min(maxFilterLengthP, 5); } } } if (maxFilterLengthQ > 3) { sideQisLarge = true; } if (edgeDir == EDGE_HOR && pos.y % slice.getSPS()->getCTUSize() == 0) { sidePisLarge = false; } const int iIndexTC = Clip3(0, MAX_QP + DEFAULT_INTRA_TC_OFFSET, int(iQP + DEFAULT_INTRA_TC_OFFSET*(uiBs - 1) + (tcOffsetDiv2 << 1))); const int iIndexB = Clip3(0, MAX_QP, iQP + (betaOffsetDiv2 << 1)); const int iTc = bitDepthLuma < 10 ? ((sm_tcTable[iIndexTC] + 2) >> (10 - bitDepthLuma)) : ((sm_tcTable[iIndexTC]) << (bitDepthLuma - 10)); const int iBeta = sm_betaTable[iIndexB ] * iBitdepthScale; const int iSideThreshold = ( iBeta + ( iBeta >> 1 ) ) >> 3; const int iThrCut = iTc * 10; const unsigned uiBlocksInPart = pelsInPart / 4 ? pelsInPart / 4 : 1; for( int iBlkIdx = 0; iBlkIdx < uiBlocksInPart; iBlkIdx++ ) { const int dp0 = xCalcDP(piTmpSrc + iSrcStep*(iIdx*pelsInPart + iBlkIdx * 4 + 0), iOffset); const int dq0 = xCalcDQ(piTmpSrc + iSrcStep*(iIdx*pelsInPart + iBlkIdx * 4 + 0), iOffset); const int dp3 = xCalcDP(piTmpSrc + iSrcStep*(iIdx*pelsInPart + iBlkIdx * 4 + 3), iOffset); const int dq3 = xCalcDQ(piTmpSrc + iSrcStep*(iIdx*pelsInPart + iBlkIdx * 4 + 3), iOffset); int dp0L = dp0; int dq0L = dq0; int dp3L = dp3; int dq3L = dq3; if (sidePisLarge) { dp0L = (dp0L + xCalcDP(piTmpSrc + iSrcStep*(iIdx*pelsInPart + iBlkIdx * 4 + 0) - 3 * iOffset, iOffset) + 1) >> 1; dp3L = (dp3L + xCalcDP(piTmpSrc + iSrcStep*(iIdx*pelsInPart + iBlkIdx * 4 + 3) - 3 * iOffset, iOffset) + 1) >> 1; } if (sideQisLarge) { dq0L = (dq0L + xCalcDQ(piTmpSrc + iSrcStep*(iIdx*pelsInPart + iBlkIdx * 4 + 0) + 3 * iOffset, iOffset) + 1) >> 1; dq3L = (dq3L + xCalcDQ(piTmpSrc + iSrcStep*(iIdx*pelsInPart + iBlkIdx * 4 + 3) + 3 * iOffset, iOffset) + 1) >> 1; } bool useLongtapFilter = false; if (sidePisLarge || sideQisLarge) { int d0L = dp0L + dq0L; int d3L = dp3L + dq3L; int dpL = dp0L + dp3L; int dqL = dq0L + dq3L; int dL = d0L + d3L; bPartPNoFilter = bPartQNoFilter = false; if (spsPaletteEnabledFlag) { // check if each of PUs is palette coded bPartPNoFilter = bPartPNoFilter || CU::isPLT(cuP); bPartQNoFilter = bPartQNoFilter || CU::isPLT(cuQ); } if (dL < iBeta) { const bool filterP = (dpL < iSideThreshold); const bool filterQ = (dqL < iSideThreshold); Pel* src0 = piTmpSrc + iSrcStep * (iIdx*pelsInPart + iBlkIdx * 4 + 0); Pel* src3 = piTmpSrc + iSrcStep * (iIdx*pelsInPart + iBlkIdx * 4 + 3); // adjust decision so that it is not read beyond p5 is maxFilterLengthP is 5 and q5 if maxFilterLengthQ is 5 const bool swL = xUseStrongFiltering(src0, iOffset, 2 * d0L, iBeta, iTc, sidePisLarge, sideQisLarge, maxFilterLengthP, maxFilterLengthQ) && xUseStrongFiltering(src3, iOffset, 2 * d3L, iBeta, iTc, sidePisLarge, sideQisLarge, maxFilterLengthP, maxFilterLengthQ); if (swL) { useLongtapFilter = true; for (int i = 0; i < DEBLOCK_SMALLEST_BLOCK / 2; i++) { xPelFilterLuma(piTmpSrc + iSrcStep*(iIdx*pelsInPart + iBlkIdx * 4 + i), iOffset, iTc, swL, bPartPNoFilter, bPartQNoFilter, iThrCut, filterP, filterQ, clpRng, sidePisLarge, sideQisLarge, maxFilterLengthP, maxFilterLengthQ); } } } } if (!useLongtapFilter) { const int d0 = dp0 + dq0; const int d3 = dp3 + dq3; const int dp = dp0 + dp3; const int dq = dq0 + dq3; const int d = d0 + d3; bPartPNoFilter = bPartQNoFilter = false; if( spsPaletteEnabledFlag) { // check if each of PUs is palette coded bPartPNoFilter = bPartPNoFilter || CU::isPLT(cuP); bPartQNoFilter = bPartQNoFilter || CU::isPLT(cuQ); } if( d < iBeta ) { bool bFilterP = false; bool bFilterQ = false; if (maxFilterLengthP > 1 && maxFilterLengthQ > 1) { bFilterP = (dp < iSideThreshold); bFilterQ = (dq < iSideThreshold); } bool sw = false; if (maxFilterLengthP > 2 && maxFilterLengthQ > 2) { sw = xUseStrongFiltering(piTmpSrc + iSrcStep * (iIdx*pelsInPart + iBlkIdx * 4 + 0), iOffset, 2 * d0, iBeta, iTc) && xUseStrongFiltering(piTmpSrc + iSrcStep * (iIdx*pelsInPart + iBlkIdx * 4 + 3), iOffset, 2 * d3, iBeta, iTc); } for( int i = 0; i < DEBLOCK_SMALLEST_BLOCK / 2; i++ ) { xPelFilterLuma( piTmpSrc + iSrcStep*( iIdx*pelsInPart + iBlkIdx * 4 + i ), iOffset, iTc, sw, bPartPNoFilter, bPartQNoFilter, iThrCut, bFilterP, bFilterQ, clpRng ); } } } } } } } void LoopFilter::xEdgeFilterChroma(const CodingUnit& cu, const DeblockEdgeDir edgeDir, const int iEdge) { const Position lumaPos = cu.Y().valid() ? cu.Y().pos() : recalcPosition( cu.chromaFormat, cu.chType, CHANNEL_TYPE_LUMA, cu.blocks[cu.chType].pos() ); const Size lumaSize = cu.Y().valid() ? cu.Y().size() : recalcSize( cu.chromaFormat, cu.chType, CHANNEL_TYPE_LUMA, cu.blocks[cu.chType].size() ); const PreCalcValues& pcv = *cu.cs->pcv; unsigned rasterIdx = getRasterIdx( lumaPos, pcv ); PelBuf picYuvRecCb = m_enc ? m_encPicYuvBuffer.getBuf(cu.block(COMPONENT_Cb)) : cu.cs->getRecoBuf(cu.block(COMPONENT_Cb)); PelBuf picYuvRecCr = m_enc ? m_encPicYuvBuffer.getBuf(cu.block(COMPONENT_Cr)) : cu.cs->getRecoBuf(cu.block(COMPONENT_Cr)); Pel *piSrcCb = picYuvRecCb.buf; Pel *piSrcCr = picYuvRecCr.buf; const int iStride = picYuvRecCb.stride; const SPS &sps = *cu.cs->sps; const PPS &pps = *cu.cs->pps; const Slice &slice = *cu.slice; const ChromaFormat nChromaFormat = sps.getChromaFormatIdc(); const unsigned uiPelsInPartChromaH = pcv.minCUWidth >> ::getComponentScaleX(COMPONENT_Cb, nChromaFormat); const unsigned uiPelsInPartChromaV = pcv.minCUHeight >> ::getComponentScaleY(COMPONENT_Cb, nChromaFormat); int iOffset, iSrcStep; unsigned uiLoopLength; bool bPartPNoFilter = false; bool bPartQNoFilter = false; #if JVET_Q0121_DEBLOCKING_CONTROL_PARAMETERS const int tcOffsetDiv2[2] = { slice.getDeblockingFilterCbTcOffsetDiv2(), slice.getDeblockingFilterCrTcOffsetDiv2() }; const int betaOffsetDiv2[2] = { slice.getDeblockingFilterCbBetaOffsetDiv2(), slice.getDeblockingFilterCrBetaOffsetDiv2() }; #else const int tcOffsetDiv2 = slice.getDeblockingFilterTcOffsetDiv2(); const int betaOffsetDiv2 = slice.getDeblockingFilterBetaOffsetDiv2(); #endif // Vertical Position unsigned uiEdgeNumInCtuVert = rasterIdx % pcv.partsInCtuWidth + iEdge; unsigned uiEdgeNumInCtuHor = rasterIdx / pcv.partsInCtuWidth + iEdge; if( ( uiPelsInPartChromaH < DEBLOCK_SMALLEST_BLOCK ) && ( uiPelsInPartChromaV < DEBLOCK_SMALLEST_BLOCK ) && ( ( ( uiEdgeNumInCtuVert % ( DEBLOCK_SMALLEST_BLOCK / uiPelsInPartChromaH ) ) && ( edgeDir == EDGE_VER ) ) || ( ( uiEdgeNumInCtuHor % ( DEBLOCK_SMALLEST_BLOCK / uiPelsInPartChromaV ) ) && ( edgeDir == EDGE_HOR ) ) ) ) { return; } unsigned uiNumParts = ( edgeDir == EDGE_VER ) ? lumaSize.height / pcv.minCUHeight : lumaSize.width / pcv.minCUWidth ; int uiNumPelsLuma = pcv.minCUWidth; unsigned uiBsAbsIdx; unsigned bS[2]; Pel* piTmpSrcCb = piSrcCb; Pel* piTmpSrcCr = piSrcCr; int xoffset, yoffset; Position pos( lumaPos.x, lumaPos.y ); if( edgeDir == EDGE_VER ) { xoffset = 0; yoffset = uiNumPelsLuma; iOffset = 1; iSrcStep = iStride; piTmpSrcCb += iEdge*uiPelsInPartChromaH; piTmpSrcCr += iEdge*uiPelsInPartChromaH; uiLoopLength = uiPelsInPartChromaV; pos = Position{ lumaPos.x + iEdge*uiNumPelsLuma, lumaPos.y - yoffset }; } else // (edgeDir == EDGE_HOR) { xoffset = uiNumPelsLuma; yoffset = 0; iOffset = iStride; iSrcStep = 1; piTmpSrcCb += iEdge*iStride*uiPelsInPartChromaV; piTmpSrcCr += iEdge*iStride*uiPelsInPartChromaV; uiLoopLength = uiPelsInPartChromaH; pos = Position{ lumaPos.x - xoffset, lumaPos.y + iEdge*uiNumPelsLuma }; } const int iBitdepthScale = 1 << (sps.getBitDepth(CHANNEL_TYPE_CHROMA) - 8); for( int iIdx = 0; iIdx < uiNumParts; iIdx++ ) { pos.x += xoffset; pos.y += yoffset; uiBsAbsIdx = getRasterIdx( pos, pcv ); unsigned tmpBs = m_aapucBS[edgeDir][uiBsAbsIdx]; tmpBs = m_aapucBS[edgeDir][uiBsAbsIdx]; bS[0] = BsGet(tmpBs, COMPONENT_Cb); bS[1] = BsGet(tmpBs, COMPONENT_Cr); if (bS[0] > 0 || bS[1] > 0) { const CodingUnit& cuQ = cu; CodingUnit& cuP1 = *cu.cs->getCU( recalcPosition( cu.chromaFormat, CHANNEL_TYPE_LUMA, cu.chType, pos.offset( xoffset - uiNumPelsLuma, yoffset - uiNumPelsLuma ) ), cu.chType ); CodingUnit& cuP = *cu.cs->getCU( recalcPosition( cu.chromaFormat, CHANNEL_TYPE_LUMA, (cuP1.isSepTree() ? CHANNEL_TYPE_CHROMA : cu.chType), pos.offset( xoffset - uiNumPelsLuma, yoffset - uiNumPelsLuma ) ), (cuP1.isSepTree() ? CHANNEL_TYPE_CHROMA : cu.chType)); if (edgeDir == EDGE_VER) { #if JVET_O1143_LPF_ACROSS_SUBPIC_BOUNDARY CHECK(!isAvailableLeft(cu, cuP, !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag(), !pps.getSubPicFromCU(cu).getloopFilterAcrossEnabledFlag()), "Neighbour not available"); #else CHECK(!isAvailableLeft(cu, cuP, !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag()), "Neighbour not available"); #endif } else // (iDir == EDGE_HOR) { #if JVET_O1143_LPF_ACROSS_SUBPIC_BOUNDARY CHECK(!isAvailableAbove(cu, cuP, !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag(), !pps.getSubPicFromCU(cu).getloopFilterAcrossEnabledFlag()), "Neighbour not available"); #else CHECK(!isAvailableAbove(cu, cuP, !pps.getLoopFilterAcrossSlicesEnabledFlag(), !pps.getLoopFilterAcrossTilesEnabledFlag()), "Neighbour not available"); #endif } bPartPNoFilter = bPartQNoFilter = false; if ( sps.getPLTMode()) { // check if each of PUs is palette coded bPartPNoFilter = bPartPNoFilter || CU::isPLT(cuP); bPartQNoFilter = bPartQNoFilter || CU::isPLT(cuQ); } const int maxFilterLengthP = m_maxFilterLengthP[COMPONENT_Cb][(pos.x-m_ctuXLumaSamples)>>m_shiftHor][(pos.y-m_ctuYLumaSamples)>>m_shiftVer]; const int maxFilterLengthQ = m_maxFilterLengthQ[COMPONENT_Cb][(pos.x-m_ctuXLumaSamples)>>m_shiftHor][(pos.y-m_ctuYLumaSamples)>>m_shiftVer]; bool largeBoundary = false; bool isChromaHorCTBBoundary = false; if ( maxFilterLengthP >= 3 && maxFilterLengthQ >= 3 ) { largeBoundary = true; } if (edgeDir == EDGE_HOR && pos.y % cuP.slice->getSPS()->getCTUSize() == 0) { isChromaHorCTBBoundary = true; } for( int chromaIdx = 0; chromaIdx < 2; chromaIdx++ ) { if ((bS[chromaIdx] == 2) || (largeBoundary && (bS[chromaIdx] == 1))) { const ClpRng& clpRng( cu.cs->slice->clpRng( ComponentID( chromaIdx + 1 )) ); Pel* piTmpSrcChroma = (chromaIdx == 0) ? piTmpSrcCb : piTmpSrcCr; const TransformUnit& tuQ = *cuQ.cs->getTU(recalcPosition( cu.chromaFormat, CHANNEL_TYPE_LUMA, CHANNEL_TYPE_CHROMA, pos), CHANNEL_TYPE_CHROMA); const TransformUnit& tuP = *cuP.cs->getTU(recalcPosition( cu.chromaFormat, CHANNEL_TYPE_LUMA, CHANNEL_TYPE_CHROMA, (edgeDir == EDGE_VER) ? pos.offset(-1, 0) : pos.offset(0, -1)), CHANNEL_TYPE_CHROMA); #if JVET_Q0820_ACT const QpParam cQP(tuP, ComponentID(chromaIdx + 1), -MAX_INT, false); const QpParam cQQ(tuQ, ComponentID(chromaIdx + 1), -MAX_INT, false); #else const QpParam cQP(tuP, ComponentID(chromaIdx + 1)); const QpParam cQQ(tuQ, ComponentID(chromaIdx + 1)); #endif const int qpBdOffset = tuP.cs->sps->getQpBDOffset(toChannelType(ComponentID(chromaIdx + 1))); int baseQp_P = cQP.Qp(0) - qpBdOffset; int baseQp_Q = cQQ.Qp(0) - qpBdOffset; int iQP = ((baseQp_Q + baseQp_P + 1) >> 1); #if JVET_Q0121_DEBLOCKING_CONTROL_PARAMETERS const int iIndexTC = Clip3<int>(0, MAX_QP + DEFAULT_INTRA_TC_OFFSET, iQP + DEFAULT_INTRA_TC_OFFSET * (bS[chromaIdx] - 1) + (tcOffsetDiv2[chromaIdx] << 1)); #else const int iIndexTC = Clip3<int>(0, MAX_QP + DEFAULT_INTRA_TC_OFFSET, iQP + DEFAULT_INTRA_TC_OFFSET * (bS[chromaIdx] - 1) + (tcOffsetDiv2 << 1)); #endif const int iTc = sps.getBitDepth(CHANNEL_TYPE_CHROMA) < 10 ? ((sm_tcTable[iIndexTC] + 2) >> (10 - sps.getBitDepth(CHANNEL_TYPE_CHROMA))) : ((sm_tcTable[iIndexTC]) << (sps.getBitDepth(CHANNEL_TYPE_CHROMA) - 10)); bool useLongFilter = false; if (largeBoundary) { #if JVET_Q0121_DEBLOCKING_CONTROL_PARAMETERS const int indexB = Clip3<int>(0, MAX_QP, iQP + (betaOffsetDiv2[chromaIdx] << 1)); #else const int indexB = Clip3<int>(0, MAX_QP, iQP + (betaOffsetDiv2 << 1)); #endif const int beta = sm_betaTable[indexB] * iBitdepthScale; const int dp0 = xCalcDP(piTmpSrcChroma + iSrcStep*(iIdx*uiLoopLength + 0), iOffset, isChromaHorCTBBoundary); const int dq0 = xCalcDQ(piTmpSrcChroma + iSrcStep*(iIdx*uiLoopLength + 0), iOffset); const int subSamplingShift = ( edgeDir == EDGE_VER ) ? m_shiftVer : m_shiftHor; const int dp3 = (subSamplingShift == 1) ? xCalcDP(piTmpSrcChroma + iSrcStep*(iIdx*uiLoopLength + 1), iOffset, isChromaHorCTBBoundary) : xCalcDP(piTmpSrcChroma + iSrcStep*(iIdx*uiLoopLength + 3), iOffset, isChromaHorCTBBoundary); const int dq3 = ( subSamplingShift == 1 ) ? xCalcDQ(piTmpSrcChroma + iSrcStep*(iIdx*uiLoopLength + 1), iOffset) : xCalcDQ(piTmpSrcChroma + iSrcStep*(iIdx*uiLoopLength + 3), iOffset); const int d0 = dp0 + dq0; const int d3 = dp3 + dq3; const int d = d0 + d3; if (d < beta) { useLongFilter = true; const bool sw = xUseStrongFiltering(piTmpSrcChroma + iSrcStep*(iIdx*uiLoopLength + 0), iOffset, 2 * d0, beta, iTc, false, false, 7, 7, isChromaHorCTBBoundary) && xUseStrongFiltering(piTmpSrcChroma + iSrcStep*(iIdx*uiLoopLength + ((subSamplingShift == 1) ? 1 : 3)), iOffset, 2 * d3, beta, iTc, false, false, 7, 7, isChromaHorCTBBoundary); for (unsigned step = 0; step < uiLoopLength; step++) { xPelFilterChroma(piTmpSrcChroma + iSrcStep*(step + iIdx*uiLoopLength), iOffset, iTc, sw, bPartPNoFilter, bPartQNoFilter, clpRng, largeBoundary, isChromaHorCTBBoundary); } } } if ( !useLongFilter ) { for (unsigned step = 0; step < uiLoopLength; step++) { xPelFilterChroma(piTmpSrcChroma + iSrcStep*(step + iIdx*uiLoopLength), iOffset, iTc, false, bPartPNoFilter, bPartQNoFilter, clpRng, largeBoundary, isChromaHorCTBBoundary); } } } } } } } /** - Deblocking for the luminance component with strong or weak filter . \param piSrc pointer to picture data \param iOffset offset value for picture data \param tc tc value \param sw decision strong/weak filter \param bPartPNoFilter indicator to disable filtering on partP \param bPartQNoFilter indicator to disable filtering on partQ \param iThrCut threshold value for weak filter decision \param bFilterSecondP decision weak filter/no filter for partP \param bFilterSecondQ decision weak filter/no filter for partQ \param bitDepthLuma luma bit depth */ inline void LoopFilter::xBilinearFilter(Pel* srcP, Pel* srcQ, int offset, int refMiddle, int refP, int refQ, int numberPSide, int numberQSide, const int* dbCoeffsP, const int* dbCoeffsQ, int tc) const { int src; const char tc7[7] = { 6, 5, 4, 3, 2, 1, 1}; const char tc3[3] = { 6, 4, 2 }; const char *tcP = (numberPSide == 3) ? tc3 : tc7; const char *tcQ = (numberQSide == 3) ? tc3 : tc7; for (int pos = 0; pos < numberPSide; pos++) { src = srcP[-offset*pos]; int cvalue = (tc * tcP[pos]) >>1; srcP[-offset * pos] = Clip3(src - cvalue, src + cvalue, ((refMiddle*dbCoeffsP[pos] + refP * (64 - dbCoeffsP[pos]) + 32) >> 6)); } for (int pos = 0; pos < numberQSide; pos++) { src = srcQ[offset*pos]; int cvalue = (tc * tcQ[pos]) >> 1; srcQ[offset*pos] = Clip3(src - cvalue, src + cvalue, ((refMiddle*dbCoeffsQ[pos] + refQ * (64 - dbCoeffsQ[pos]) + 32) >> 6)); } } inline void LoopFilter::xFilteringPandQ(Pel* src, int offset, int numberPSide, int numberQSide, int tc) const { CHECK(numberPSide <= 3 && numberQSide <= 3, "Short filtering in long filtering function"); Pel* srcP = src-offset; Pel* srcQ = src; int refP = 0; int refQ = 0; int refMiddle = 0; const int dbCoeffs7[7] = { 59, 50, 41,32,23,14,5 }; const int dbCoeffs3[3] = { 53, 32, 11 }; const int dbCoeffs5[5] = { 58, 45, 32,19,6}; const int* dbCoeffsP = numberPSide == 7 ? dbCoeffs7 : (numberPSide==5) ? dbCoeffs5 : dbCoeffs3; const int* dbCoeffsQ = numberQSide == 7 ? dbCoeffs7 : (numberQSide==5) ? dbCoeffs5 : dbCoeffs3; switch (numberPSide) { case 7: refP = (srcP[-6*offset] + srcP[-7 * offset] + 1) >> 1; break; case 3: refP = (srcP[-2 * offset] + srcP[-3 * offset] + 1) >> 1; break; case 5: refP = (srcP[-4 * offset] + srcP[-5 * offset] + 1) >> 1; break; } switch (numberQSide) { case 7: refQ = (srcQ[6 * offset] + srcQ[7 * offset] + 1) >> 1; break; case 3: refQ = (srcQ[2 * offset] + srcQ[3 * offset] + 1) >> 1; break; case 5: refQ = (srcQ[4 * offset] + srcQ[5 * offset] + 1) >> 1; break; } if (numberPSide == numberQSide) { if (numberPSide == 5) { refMiddle = (2 * (srcP[0] + srcQ[0] + srcP[-offset] + srcQ[offset] + srcP[-2 * offset] + srcQ[2 * offset]) + srcP[-3 * offset] + srcQ[3 * offset] + srcP[-4 * offset] + srcQ[4 * offset] + 8) >> 4; } else { refMiddle = (2 * (srcP[0] + srcQ[0]) + srcP[-offset] + srcQ[offset] + srcP[-2 * offset] + srcQ[2 * offset] + srcP[-3 * offset] + srcQ[3 * offset] + srcP[-4 * offset] + srcQ[4 * offset] + srcP[-5 * offset] + srcQ[5 * offset] + +srcP[-6 * offset] + srcQ[6 * offset] + 8) >> 4; } } else { Pel* srcPt = srcP; Pel* srcQt = srcQ; int offsetP = -offset; int offsetQ = offset; int newNumberQSide = numberQSide; int newNumberPSide = numberPSide; if (numberQSide > numberPSide) { std::swap(srcPt, srcQt); std::swap(offsetP, offsetQ); newNumberQSide = numberPSide; newNumberPSide = numberQSide; } if (newNumberPSide == 7 && newNumberQSide == 5) { refMiddle = (2 * (srcP[0] + srcQ[0] + srcP[-offset] + srcQ[offset]) + srcP[-2 * offset] + srcQ[2 * offset] + srcP[-3 * offset] + srcQ[3 * offset] + srcP[-4 * offset] + srcQ[4 * offset] + srcP[-5 * offset] + srcQ[5 * offset] + 8) >> 4; } else if (newNumberPSide == 7 && newNumberQSide == 3) { refMiddle = (2 * (srcPt[0] + srcQt[0]) + srcQt[0] + 2 * (srcQt[offsetQ] + srcQt[2 * offsetQ]) + srcPt[offsetP] + srcQt[offsetQ] + srcPt[2 * offsetP] + srcPt[3 * offsetP] + srcPt[4 * offsetP] + srcPt[5 * offsetP] + srcPt[6 * offsetP] + 8) >> 4; } else //if (newNumberPSide == 5 && newNumberQSide == 3) { refMiddle = (srcP[0] + srcQ[0] + srcP[-offset] + srcQ[offset] + srcP[-2 * offset] + srcQ[2 * offset] + srcP[-3 * offset] + srcQ[3 * offset] + 4) >> 3; } } xBilinearFilter(srcP,srcQ,offset,refMiddle,refP,refQ,numberPSide,numberQSide,dbCoeffsP,dbCoeffsQ,tc); } inline void LoopFilter::xPelFilterLuma(Pel* piSrc, const int iOffset, const int tc, const bool sw, const bool bPartPNoFilter, const bool bPartQNoFilter, const int iThrCut, const bool bFilterSecondP, const bool bFilterSecondQ, const ClpRng& clpRng, bool sidePisLarge, bool sideQisLarge, int maxFilterLengthP, int maxFilterLengthQ) const { int delta; const Pel m4 = piSrc[ 0 ]; const Pel m3 = piSrc[-iOffset ]; const Pel m5 = piSrc[ iOffset ]; const Pel m2 = piSrc[-iOffset * 2]; const Pel m6 = piSrc[ iOffset * 2]; const Pel m1 = piSrc[-iOffset * 3]; const Pel m7 = piSrc[ iOffset * 3]; const Pel m0 = piSrc[-iOffset * 4]; const Pel mP1 = piSrc[-iOffset * 5]; const Pel mP2 = piSrc[-iOffset * 6]; const Pel mP3 = piSrc[-iOffset * 7]; const Pel m8 = piSrc[ iOffset * 4]; const Pel m9 = piSrc[ iOffset * 5]; const Pel m10 = piSrc[ iOffset * 6]; const char tc3[3] = { 3, 2, 1}; if (sw) { if (sidePisLarge || sideQisLarge) { xFilteringPandQ(piSrc, iOffset, sidePisLarge ? maxFilterLengthP : 3, sideQisLarge ? maxFilterLengthQ : 3, tc); } else { piSrc[-iOffset] = Clip3(m3 - tc3[0] * tc, m3 + tc3[0] * tc, ((m1 + 2 * m2 + 2 * m3 + 2 * m4 + m5 + 4) >> 3)); piSrc[0] = Clip3(m4 - tc3[0] * tc, m4 + tc3[0] * tc, ((m2 + 2 * m3 + 2 * m4 + 2 * m5 + m6 + 4) >> 3)); piSrc[-iOffset * 2] = Clip3(m2 - tc3[1] * tc, m2 + tc3[1] * tc, ((m1 + m2 + m3 + m4 + 2) >> 2)); piSrc[iOffset] = Clip3(m5 - tc3[1] * tc, m5 + tc3[1] * tc, ((m3 + m4 + m5 + m6 + 2) >> 2)); piSrc[-iOffset * 3] = Clip3(m1 - tc3[2] * tc, m1 + tc3[2] * tc, ((2 * m0 + 3 * m1 + m2 + m3 + m4 + 4) >> 3)); piSrc[iOffset * 2] = Clip3(m6 - tc3[2] * tc, m6 + tc3[2] * tc, ((m3 + m4 + m5 + 3 * m6 + 2 * m7 + 4) >> 3)); } } else { /* Weak filter */ delta = ( 9 * ( m4 - m3 ) - 3 * ( m5 - m2 ) + 8 ) >> 4; if ( abs(delta) < iThrCut ) { delta = Clip3( -tc, tc, delta ); piSrc[-iOffset] = ClipPel( m3 + delta, clpRng); piSrc[0] = ClipPel( m4 - delta, clpRng); const int tc2 = tc >> 1; if( bFilterSecondP ) { const int delta1 = Clip3( -tc2, tc2, ( ( ( ( m1 + m3 + 1 ) >> 1 ) - m2 + delta ) >> 1 ) ); piSrc[-iOffset * 2] = ClipPel( m2 + delta1, clpRng); } if( bFilterSecondQ ) { const int delta2 = Clip3( -tc2, tc2, ( ( ( ( m6 + m4 + 1 ) >> 1 ) - m5 - delta ) >> 1 ) ); piSrc[iOffset] = ClipPel( m5 + delta2, clpRng); } } } if(bPartPNoFilter) { piSrc[-iOffset ] = m3; piSrc[-iOffset * 2] = m2; piSrc[-iOffset * 3] = m1; if (sidePisLarge) { piSrc[-iOffset * 4] = m0; piSrc[-iOffset * 5] = mP1; piSrc[-iOffset * 6] = mP2; piSrc[-iOffset * 7] = mP3; } } if(bPartQNoFilter) { piSrc[ 0 ] = m4; piSrc[ iOffset ] = m5; piSrc[ iOffset * 2] = m6; if (sideQisLarge) { piSrc[iOffset * 3] = m7; piSrc[iOffset * 4] = m8; piSrc[iOffset * 5] = m9; piSrc[iOffset * 6] = m10; } } } /** - Deblocking of one line/column for the chrominance component . \param piSrc pointer to picture data \param iOffset offset value for picture data \param tc tc value \param bPartPNoFilter indicator to disable filtering on partP \param bPartQNoFilter indicator to disable filtering on partQ \param bitDepthChroma chroma bit depth */ inline void LoopFilter::xPelFilterChroma(Pel* piSrc, const int iOffset, const int tc, const bool sw, const bool bPartPNoFilter, const bool bPartQNoFilter, const ClpRng& clpRng, const bool largeBoundary, const bool isChromaHorCTBBoundary) const { int delta; const Pel m0 = piSrc[-iOffset * 4]; const Pel m1 = piSrc[-iOffset * 3]; const Pel m2 = piSrc[-iOffset * 2]; const Pel m3 = piSrc[-iOffset]; const Pel m4 = piSrc[0]; const Pel m5 = piSrc[iOffset]; const Pel m6 = piSrc[iOffset * 2]; const Pel m7 = piSrc[iOffset * 3]; if (sw) { if (isChromaHorCTBBoundary) { piSrc[-iOffset * 1] = Clip3(m3 - tc, m3 + tc, ((3 * m2 + 2 * m3 + m4 + m5 + m6 + 4) >> 3)); // p0 piSrc[0] = Clip3(m4 - tc, m4 + tc, ((2 * m2 + m3 + 2 * m4 + m5 + m6 + m7 + 4) >> 3)); // q0 piSrc[iOffset * 1] = Clip3(m5 - tc, m5 + tc, ((m2 + m3 + m4 + 2 * m5 + m6 + 2 * m7 + 4) >> 3)); // q1 piSrc[iOffset * 2] = Clip3(m6 - tc, m6 + tc, ((m3 + m4 + m5 + 2 * m6 + 3 * m7 + 4) >> 3)); // q2 } else { piSrc[-iOffset * 3] = Clip3(m1 - tc, m1 + tc, ((3 * m0 + 2 * m1 + m2 + m3 + m4 + 4) >> 3)); // p2 piSrc[-iOffset * 2] = Clip3(m2 - tc, m2 + tc, ((2 * m0 + m1 + 2 * m2 + m3 + m4 + m5 + 4) >> 3)); // p1 piSrc[-iOffset * 1] = Clip3(m3 - tc, m3 + tc, ((m0 + m1 + m2 + 2 * m3 + m4 + m5 + m6 + 4) >> 3)); // p0 piSrc[0] = Clip3(m4 - tc, m4 + tc, ((m1 + m2 + m3 + 2 * m4 + m5 + m6 + m7 + 4) >> 3)); // q0 piSrc[iOffset * 1] = Clip3(m5 - tc, m5 + tc, ((m2 + m3 + m4 + 2 * m5 + m6 + 2 * m7 + 4) >> 3)); // q1 piSrc[iOffset * 2] = Clip3(m6 - tc, m6 + tc, ((m3 + m4 + m5 + 2 * m6 + 3 * m7 + 4) >> 3)); // q2 } } else { delta = Clip3(-tc, tc, ((((m4 - m3) << 2) + m2 - m5 + 4) >> 3)); piSrc[-iOffset] = ClipPel(m3 + delta, clpRng); piSrc[0] = ClipPel(m4 - delta, clpRng); } if( bPartPNoFilter ) { if (largeBoundary) { piSrc[-iOffset * 3] = m1; // p2 piSrc[-iOffset * 2] = m2; // p1 } piSrc[-iOffset] = m3; } if( bPartQNoFilter ) { if (largeBoundary) { piSrc[iOffset * 1] = m5; // q1 piSrc[iOffset * 2] = m6; // q2 } piSrc[ 0 ] = m4; } } /** - Decision between strong and weak filter . \param offset offset value for picture data \param d d value \param beta beta value \param tc tc value \param piSrc pointer to picture data */ inline bool LoopFilter::xUseStrongFiltering(Pel* piSrc, const int iOffset, const int d, const int beta, const int tc, bool sidePisLarge, bool sideQisLarge, int maxFilterLengthP, int maxFilterLengthQ, bool isChromaHorCTBBoundary) const { const Pel m4 = piSrc[ 0 ]; const Pel m3 = piSrc[-iOffset ]; const Pel m7 = piSrc[ iOffset * 3]; const Pel m0 = piSrc[-iOffset * 4]; const Pel m2 = piSrc[-iOffset * 2]; int sp3 = abs(m0 - m3); if (isChromaHorCTBBoundary) { sp3 = abs(m2 - m3); } int sq3 = abs(m7 - m4); const int d_strong = sp3 + sq3; if (sidePisLarge || sideQisLarge) { Pel mP4; Pel m11; #if JVET_Q0054 if (sidePisLarge) { if (maxFilterLengthP == 7) { const Pel mP5 = piSrc[-iOffset * 5]; const Pel mP6 = piSrc[-iOffset * 6]; const Pel mP7 = piSrc[-iOffset * 7];; mP4 = piSrc[-iOffset * 8]; sp3 = sp3 + abs(mP5 - mP6 - mP7 + mP4); } else { mP4 = piSrc[-iOffset * 6]; } sp3 = (sp3 + abs(m0 - mP4) + 1) >> 1; } if (sideQisLarge) { if (maxFilterLengthQ == 7) { const Pel m8 = piSrc[iOffset * 4]; const Pel m9 = piSrc[iOffset * 5]; const Pel m10 = piSrc[iOffset * 6];; m11 = piSrc[iOffset * 7]; sq3 = sq3 + abs(m8 - m9 - m10 + m11); } else { m11 = piSrc[iOffset * 5]; } sq3 = (sq3 + abs(m11 - m7) + 1) >> 1; } return ((sp3 + sq3) < (beta*3 >> 5)) && (d < (beta >> 4)) && (abs(m3 - m4) < ((tc * 5 + 1) >> 1)); #else if (maxFilterLengthP == 5) { mP4 = piSrc[-iOffset * 6]; } else { mP4 = piSrc[-iOffset * 8]; } if (maxFilterLengthQ == 5) { m11 = piSrc[iOffset * 5]; } else { m11 = piSrc[iOffset * 7]; } if (sidePisLarge) { sp3 = (sp3 + abs(m0 - mP4) + 1) >> 1; } if (sideQisLarge) { sq3 = (sq3 + abs(m11 - m7) + 1) >> 1; } return ((sp3 + sq3) < (beta*3 >> 5)) && (d < (beta >> 2)) && (abs(m3 - m4) < ((tc * 5 + 1) >> 1)); #endif } else return ( ( d_strong < ( beta >> 3 ) ) && ( d < ( beta >> 2 ) ) && ( abs( m3 - m4 ) < ( ( tc * 5 + 1 ) >> 1 ) ) ); } inline int LoopFilter::xCalcDP(Pel* piSrc, const int iOffset, const bool isChromaHorCTBBoundary) const { if (isChromaHorCTBBoundary) { return abs(piSrc[-iOffset * 2] - 2 * piSrc[-iOffset * 2] + piSrc[-iOffset]); } else { return abs(piSrc[-iOffset * 3] - 2 * piSrc[-iOffset * 2] + piSrc[-iOffset]); } } inline int LoopFilter::xCalcDQ( Pel* piSrc, const int iOffset ) const { return abs( piSrc[0] - 2 * piSrc[iOffset] + piSrc[iOffset * 2] ); } inline unsigned LoopFilter::BsSet(unsigned val, const ComponentID compIdx) const { return (val << (compIdx << 1)); } inline unsigned LoopFilter::BsGet(unsigned val, const ComponentID compIdx) const { return ((val >> (compIdx << 1)) & 3); } //! \}
/* * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org> * * SPDX-License-Identifier: BSD-2-Clause */ #include <AK/Function.h> #include <AK/LexicalPath.h> #include <LibCore/File.h> #include <LibCore/StandardPaths.h> #include <LibGUI/Action.h> #include <LibGUI/BoxLayout.h> #include <LibGUI/Button.h> #include <LibGUI/CommonLocationsProvider.h> #include <LibGUI/FileIconProvider.h> #include <LibGUI/FilePicker.h> #include <LibGUI/FilePickerDialogGML.h> #include <LibGUI/FileSystemModel.h> #include <LibGUI/InputBox.h> #include <LibGUI/Menu.h> #include <LibGUI/MessageBox.h> #include <LibGUI/MultiView.h> #include <LibGUI/SortingProxyModel.h> #include <LibGUI/TextBox.h> #include <LibGUI/Toolbar.h> #include <LibGfx/FontDatabase.h> #include <LibGfx/Palette.h> #include <string.h> namespace GUI { Optional<String> FilePicker::get_open_filepath(Window* parent_window, const String& window_title, const StringView& path, bool folder) { auto picker = FilePicker::construct(parent_window, folder ? Mode::OpenFolder : Mode::Open, "", path); if (!window_title.is_null()) picker->set_title(window_title); if (picker->exec() == Dialog::ExecOK) { String file_path = picker->selected_file(); if (file_path.is_null()) return {}; return file_path; } return {}; } Optional<String> FilePicker::get_save_filepath(Window* parent_window, const String& title, const String& extension, const StringView& path) { auto picker = FilePicker::construct(parent_window, Mode::Save, String::formatted("{}.{}", title, extension), path); if (picker->exec() == Dialog::ExecOK) { String file_path = picker->selected_file(); if (file_path.is_null()) return {}; return file_path; } return {}; } FilePicker::FilePicker(Window* parent_window, Mode mode, const StringView& filename, const StringView& path) : Dialog(parent_window) , m_model(FileSystemModel::create()) , m_mode(mode) { switch (m_mode) { case Mode::Open: case Mode::OpenMultiple: case Mode::OpenFolder: set_title("Open"); set_icon(Gfx::Bitmap::load_from_file("/res/icons/16x16/open.png")); break; case Mode::Save: set_title("Save as"); set_icon(Gfx::Bitmap::load_from_file("/res/icons/16x16/save.png")); break; } resize(560, 320); auto& widget = set_main_widget<GUI::Widget>(); if (!widget.load_from_gml(file_picker_dialog_gml)) VERIFY_NOT_REACHED(); auto& toolbar = *widget.find_descendant_of_type_named<GUI::Toolbar>("toolbar"); toolbar.set_has_frame(false); m_location_textbox = *widget.find_descendant_of_type_named<GUI::TextBox>("location_textbox"); m_location_textbox->set_text(path); m_view = *widget.find_descendant_of_type_named<GUI::MultiView>("view"); m_view->set_selection_mode(m_mode == Mode::OpenMultiple ? GUI::AbstractView::SelectionMode::MultiSelection : GUI::AbstractView::SelectionMode::SingleSelection); m_view->set_model(SortingProxyModel::create(*m_model)); m_view->set_model_column(FileSystemModel::Column::Name); m_view->set_key_column_and_sort_order(GUI::FileSystemModel::Column::Name, GUI::SortOrder::Ascending); m_view->set_column_visible(FileSystemModel::Column::Owner, true); m_view->set_column_visible(FileSystemModel::Column::Group, true); m_view->set_column_visible(FileSystemModel::Column::Permissions, true); m_view->set_column_visible(FileSystemModel::Column::Inode, true); m_view->set_column_visible(FileSystemModel::Column::SymlinkTarget, true); m_model->register_client(*this); m_location_textbox->on_return_pressed = [this] { set_path(m_location_textbox->text()); }; auto open_parent_directory_action = Action::create( "Open parent directory", { Mod_Alt, Key_Up }, Gfx::Bitmap::load_from_file("/res/icons/16x16/open-parent-directory.png"), [this](const Action&) { set_path(String::formatted("{}/..", m_model->root_path())); }, this); toolbar.add_action(*open_parent_directory_action); auto go_home_action = CommonActions::make_go_home_action([this](auto&) { set_path(Core::StandardPaths::home_directory()); }, this); toolbar.add_action(go_home_action); toolbar.add_separator(); auto mkdir_action = Action::create( "New directory...", Gfx::Bitmap::load_from_file("/res/icons/16x16/mkdir.png"), [this](const Action&) { String value; if (InputBox::show(this, value, "Enter name:", "New directory") == InputBox::ExecOK && !value.is_empty()) { auto new_dir_path = LexicalPath::canonicalized_path(String::formatted("{}/{}", m_model->root_path(), value)); int rc = mkdir(new_dir_path.characters(), 0777); if (rc < 0) { MessageBox::show(this, String::formatted("mkdir(\"{}\") failed: {}", new_dir_path, strerror(errno)), "Error", MessageBox::Type::Error); } else { m_model->update(); } } }, this); toolbar.add_action(*mkdir_action); toolbar.add_separator(); toolbar.add_action(m_view->view_as_icons_action()); toolbar.add_action(m_view->view_as_table_action()); toolbar.add_action(m_view->view_as_columns_action()); m_filename_textbox = *widget.find_descendant_of_type_named<GUI::TextBox>("filename_textbox"); m_filename_textbox->set_focus(true); if (m_mode == Mode::Save) { m_filename_textbox->set_text(filename); m_filename_textbox->select_all(); } m_filename_textbox->on_return_pressed = [&] { on_file_return(); }; m_view->on_selection_change = [this] { auto index = m_view->selection().first(); auto& filter_model = (SortingProxyModel&)*m_view->model(); auto local_index = filter_model.map_to_source(index); const FileSystemModel::Node& node = m_model->node(local_index); LexicalPath path { node.full_path() }; auto should_open_folder = m_mode == Mode::OpenFolder; if (should_open_folder == node.is_directory()) { m_filename_textbox->set_text(node.name); } else if (m_mode != Mode::Save) { m_filename_textbox->clear(); } }; m_context_menu = GUI::Menu::construct(); m_context_menu->add_action(GUI::Action::create_checkable("Show dotfiles", [&](auto& action) { m_model->set_should_show_dotfiles(action.is_checked()); m_model->update(); })); m_view->on_context_menu_request = [&](const GUI::ModelIndex& index, const GUI::ContextMenuEvent& event) { if (!index.is_valid()) { m_context_menu->popup(event.screen_position()); } }; auto& ok_button = *widget.find_descendant_of_type_named<GUI::Button>("ok_button"); ok_button.set_text(ok_button_name(m_mode)); ok_button.on_click = [this](auto) { on_file_return(); }; auto& cancel_button = *widget.find_descendant_of_type_named<GUI::Button>("cancel_button"); cancel_button.set_text("Cancel"); cancel_button.on_click = [this](auto) { done(ExecCancel); }; m_view->on_activation = [this](auto& index) { auto& filter_model = (SortingProxyModel&)*m_view->model(); auto local_index = filter_model.map_to_source(index); const FileSystemModel::Node& node = m_model->node(local_index); auto path = node.full_path(); if (node.is_directory() || node.is_symlink_to_directory()) { set_path(path); // NOTE: 'node' is invalid from here on } else { on_file_return(); } }; auto& common_locations_frame = *widget.find_descendant_of_type_named<Frame>("common_locations_frame"); common_locations_frame.set_background_role(Gfx::ColorRole::Tray); m_model->on_complete = [&] { for (auto location_button : m_common_location_buttons) location_button.button.set_checked(m_model->root_path() == location_button.path); }; for (auto& location : CommonLocationsProvider::common_locations()) { String path = location.path; auto& button = common_locations_frame.add<GUI::Button>(); button.set_button_style(Gfx::ButtonStyle::Tray); button.set_foreground_role(Gfx::ColorRole::TrayText); button.set_text_alignment(Gfx::TextAlignment::CenterLeft); button.set_text(location.name); button.set_icon(FileIconProvider::icon_for_path(path).bitmap_for_size(16)); button.set_fixed_height(22); button.set_checkable(true); button.set_exclusive(true); button.on_click = [this, path](auto) { set_path(path); }; m_common_location_buttons.append({ path, button }); } set_path(path); } FilePicker::~FilePicker() { m_model->unregister_client(*this); } void FilePicker::model_did_update(unsigned) { m_location_textbox->set_text(m_model->root_path()); } void FilePicker::on_file_return() { auto path = m_filename_textbox->text(); if (!path.starts_with('/')) { path = LexicalPath::join(m_model->root_path(), path).string(); } bool file_exists = Core::File::exists(path); if (!file_exists && (m_mode == Mode::Open || m_mode == Mode::OpenFolder)) { MessageBox::show(this, String::formatted("No such file or directory: {}", m_filename_textbox->text()), "File not found", MessageBox::Type::Error, MessageBox::InputType::OK); return; } if (file_exists && m_mode == Mode::Save) { auto result = MessageBox::show(this, "File already exists. Overwrite?", "Existing File", MessageBox::Type::Warning, MessageBox::InputType::OKCancel); if (result == MessageBox::ExecCancel) return; } m_selected_file = path; done(ExecOK); } void FilePicker::set_path(const String& path) { auto new_path = LexicalPath(path).string(); m_location_textbox->set_icon(FileIconProvider::icon_for_path(new_path).bitmap_for_size(16)); m_model->set_root_path(new_path); } }
#include <iostream> int main() { // ポインタを宣言 int *p1; // メモリ確保(実体化) p1 = new int; *p1 = 1000; std::cout << *p1 << std::endl; // 一度解放 delete p1; // 再メモリ確保(再実体化) p1 = new int; *p1 = 2000; std::cout << *p1 << std::endl; // ポインタ2を用意して,ポインタ1のアドレスを渡し,delete int *p2; p2 = p1; delete p2; // メモリ解放はされたが,まだ書き換わっていないため値は出る? // これは異常な動作? std::cout << *p1 << std::endl; return 0; }
/** * @file llagentwearables.cpp * @brief LLAgentWearables class implementation * * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code * Copyright (C) 2010, Linden Research, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License only. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ #include "llviewerprecompiledheaders.h" #include "llagentwearables.h" #include "llaccordionctrltab.h" #include "llagent.h" #include "llagentcamera.h" #include "llagentwearablesfetch.h" #include "llappearancemgr.h" #include "llcallbacklist.h" #include "llfloatersidepanelcontainer.h" #include "llgesturemgr.h" #include "llinventorybridge.h" #include "llinventoryfunctions.h" #include "llinventoryobserver.h" #include "llinventorypanel.h" #include "lllocaltextureobject.h" #include "llmd5.h" #include "llnotificationsutil.h" #include "lloutfitobserver.h" #include "llsidepanelappearance.h" #include "lltexlayer.h" #include "lltooldraganddrop.h" #include "llviewerregion.h" #include "llvoavatarself.h" #include "llviewerwearable.h" #include "llwearablelist.h" // [RLVa:KB] - Checked: 2011-05-22 (RLVa-1.3.1) #include "rlvhandler.h" #include "rlvlocks.h" // [/RLVa:KB] #include <boost/scoped_ptr.hpp> LLAgentWearables gAgentWearables; BOOL LLAgentWearables::mInitialWearablesUpdateReceived = FALSE; // [SL:KB] - Patch: Appearance-InitialWearablesLoadedCallback | Checked: 2010-08-14 (Catznip-2.1) bool LLAgentWearables::mInitialWearablesLoaded = false; // [/SL:KB] using namespace LLAvatarAppearanceDefines; /////////////////////////////////////////////////////////////////////////////// // Callback to wear and start editing an item that has just been created. void wear_and_edit_cb(const LLUUID& inv_item) { if (inv_item.isNull()) return; // Request editing the item after it gets worn. gAgentWearables.requestEditingWearable(inv_item); // Wear it. LLAppearanceMgr::instance().wearItemOnAvatar(inv_item); } /////////////////////////////////////////////////////////////////////////////// // HACK: For EXT-3923: Pants item shows in inventory with skin icon and messes with "current look" // Some db items are corrupted, have inventory flags = 0, implying wearable type = shape, even though // wearable type stored in asset is some other value. // Calling this function whenever a wearable is added to increase visibility if this problem // turns up in other inventories. void checkWearableAgainstInventory(LLViewerWearable *wearable) { if (wearable->getItemID().isNull()) return; // Check for wearable type consistent with inventory item wearable type. LLViewerInventoryItem *item = gInventory.getItem(wearable->getItemID()); if (item) { if (!item->isWearableType()) { LL_WARNS() << "wearable associated with non-wearable item" << LL_ENDL; } if (item->getWearableType() != wearable->getType()) { LL_WARNS() << "type mismatch: wearable " << wearable->getName() << " has type " << wearable->getType() << " but inventory item " << item->getName() << " has type " << item->getWearableType() << LL_ENDL; } } else { LL_WARNS() << "wearable inventory item not found" << wearable->getName() << " itemID " << wearable->getItemID().asString() << LL_ENDL; } } void LLAgentWearables::dump() { LL_INFOS() << "LLAgentWearablesDump" << LL_ENDL; for (S32 i = 0; i < LLWearableType::WT_COUNT; i++) { U32 count = getWearableCount((LLWearableType::EType)i); LL_INFOS() << "Type: " << i << " count " << count << LL_ENDL; for (U32 j=0; j<count; j++) { LLViewerWearable* wearable = getViewerWearable((LLWearableType::EType)i,j); if (wearable == NULL) { LL_INFOS() << " " << j << " NULL wearable" << LL_ENDL; } LL_INFOS() << " " << j << " Name " << wearable->getName() << " description " << wearable->getDescription() << LL_ENDL; } } LL_INFOS() << "Total items awaiting wearable update " << mItemsAwaitingWearableUpdate.size() << LL_ENDL; for (std::set<LLUUID>::iterator it = mItemsAwaitingWearableUpdate.begin(); it != mItemsAwaitingWearableUpdate.end(); ++it) { LL_INFOS() << (*it).asString() << LL_ENDL; } } struct LLAgentDumper { LLAgentDumper(std::string name): mName(name) { LL_INFOS() << LL_ENDL; LL_INFOS() << "LLAgentDumper " << mName << LL_ENDL; gAgentWearables.dump(); } ~LLAgentDumper() { LL_INFOS() << LL_ENDL; LL_INFOS() << "~LLAgentDumper " << mName << LL_ENDL; gAgentWearables.dump(); } std::string mName; }; LLAgentWearables::LLAgentWearables() : LLWearableData(), mWearablesLoaded(FALSE) , mCOFChangeInProgress(false) { } LLAgentWearables::~LLAgentWearables() { cleanup(); } void LLAgentWearables::cleanup() { } // static void LLAgentWearables::initClass() { // this can not be called from constructor because its instance is global and is created too early. // Subscribe to "COF is Saved" signal to notify observers about this (Loading indicator for ex.). LLOutfitObserver::instance().addCOFSavedCallback(boost::bind(&LLAgentWearables::notifyLoadingFinished, &gAgentWearables)); } void LLAgentWearables::setAvatarObject(LLVOAvatarSelf *avatar) { llassert(avatar); avatar->outputRezTiming("Sending wearables request"); sendAgentWearablesRequest(); setAvatarAppearance(avatar); } // wearables LLAgentWearables::createStandardWearablesAllDoneCallback::~createStandardWearablesAllDoneCallback() { LL_INFOS() << "destructor - all done?" << LL_ENDL; gAgentWearables.createStandardWearablesAllDone(); } LLAgentWearables::sendAgentWearablesUpdateCallback::~sendAgentWearablesUpdateCallback() { gAgentWearables.sendAgentWearablesUpdate(); } /** * @brief Construct a callback for dealing with the wearables. * * Would like to pass the agent in here, but we can't safely * count on it being around later. Just use gAgent directly. * @param cb callback to execute on completion (??? unused ???) * @param type Type for the wearable in the agent * @param wearable The wearable data. * @param todo Bitmask of actions to take on completion. */ LLAgentWearables::addWearableToAgentInventoryCallback::addWearableToAgentInventoryCallback( LLPointer<LLRefCount> cb, LLWearableType::EType type, U32 index, LLViewerWearable* wearable, U32 todo, const std::string description) : mType(type), mIndex(index), mWearable(wearable), mTodo(todo), mCB(cb), mDescription(description) { LL_INFOS() << "constructor" << LL_ENDL; } void LLAgentWearables::addWearableToAgentInventoryCallback::fire(const LLUUID& inv_item) { if (mTodo & CALL_CREATESTANDARDDONE) { LL_INFOS() << "callback fired, inv_item " << inv_item.asString() << LL_ENDL; } if (inv_item.isNull()) return; gAgentWearables.addWearabletoAgentInventoryDone(mType, mIndex, inv_item, mWearable); if (mTodo & CALL_UPDATE) { gAgentWearables.sendAgentWearablesUpdate(); } if (mTodo & CALL_RECOVERDONE) { LLAppearanceMgr::instance().addCOFItemLink(inv_item,false); gAgentWearables.recoverMissingWearableDone(); } /* * Do this for every one in the loop */ if (mTodo & CALL_CREATESTANDARDDONE) { LLAppearanceMgr::instance().addCOFItemLink(inv_item,false); gAgentWearables.createStandardWearablesDone(mType, mIndex); } if (mTodo & CALL_MAKENEWOUTFITDONE) { gAgentWearables.makeNewOutfitDone(mType, mIndex); } if (mTodo & CALL_WEARITEM) { LLAppearanceMgr::instance().addCOFItemLink(inv_item, true, NULL, mDescription); } } void LLAgentWearables::addWearabletoAgentInventoryDone(const LLWearableType::EType type, const U32 index, const LLUUID& item_id, LLViewerWearable* wearable) { LL_INFOS() << "type " << type << " index " << index << " item " << item_id.asString() << LL_ENDL; if (item_id.isNull()) return; LLUUID old_item_id = getWearableItemID(type,index); if (wearable) { wearable->setItemID(item_id); if (old_item_id.notNull()) { gInventory.addChangedMask(LLInventoryObserver::LABEL, old_item_id); setWearable(type,index,wearable); } else { pushWearable(type,wearable); } } gInventory.addChangedMask(LLInventoryObserver::LABEL, item_id); LLViewerInventoryItem* item = gInventory.getItem(item_id); if (item && wearable) { // We're changing the asset id, so we both need to set it // locally via setAssetUUID() and via setTransactionID() which // will be decoded on the server. JC item->setAssetUUID(wearable->getAssetID()); item->setTransactionID(wearable->getTransactionID()); gInventory.addChangedMask(LLInventoryObserver::INTERNAL, item_id); item->updateServer(FALSE); } gInventory.notifyObservers(); } void LLAgentWearables::sendAgentWearablesUpdate() { // First make sure that we have inventory items for each wearable for (S32 type=0; type < LLWearableType::WT_COUNT; ++type) { for (U32 index=0; index < getWearableCount((LLWearableType::EType)type); ++index) { LLViewerWearable* wearable = getViewerWearable((LLWearableType::EType)type,index); if (wearable) { if (wearable->getItemID().isNull()) { LLPointer<LLInventoryCallback> cb = new addWearableToAgentInventoryCallback( LLPointer<LLRefCount>(NULL), (LLWearableType::EType)type, index, wearable, addWearableToAgentInventoryCallback::CALL_NONE); addWearableToAgentInventory(cb, wearable); } else { gInventory.addChangedMask(LLInventoryObserver::LABEL, wearable->getItemID()); } } } } // Then make sure the inventory is in sync with the avatar. gInventory.notifyObservers(); // Send the AgentIsNowWearing gMessageSystem->newMessageFast(_PREHASH_AgentIsNowWearing); gMessageSystem->nextBlockFast(_PREHASH_AgentData); gMessageSystem->addUUIDFast(_PREHASH_AgentID, gAgent.getID()); gMessageSystem->addUUIDFast(_PREHASH_SessionID, gAgent.getSessionID()); LL_DEBUGS() << "sendAgentWearablesUpdate()" << LL_ENDL; // MULTI-WEARABLE: DEPRECATED: HACK: index to 0- server database tables don't support concept of multiwearables. for (S32 type=0; type < LLWearableType::WT_COUNT; ++type) { gMessageSystem->nextBlockFast(_PREHASH_WearableData); U8 type_u8 = (U8)type; gMessageSystem->addU8Fast(_PREHASH_WearableType, type_u8); LLViewerWearable* wearable = getViewerWearable((LLWearableType::EType)type, 0); if (wearable) { //LL_INFOS() << "Sending wearable " << wearable->getName() << LL_ENDL; LLUUID item_id = wearable->getItemID(); const LLViewerInventoryItem *item = gInventory.getItem(item_id); if (item && item->getIsLinkType()) { // Get the itemID that this item points to. i.e. make sure // we are storing baseitems, not their links, in the database. item_id = item->getLinkedUUID(); } gMessageSystem->addUUIDFast(_PREHASH_ItemID, item_id); } else { //LL_INFOS() << "Not wearing wearable type " << LLWearableType::getTypeName((LLWearableType::EType)i) << LL_ENDL; gMessageSystem->addUUIDFast(_PREHASH_ItemID, LLUUID::null); } LL_DEBUGS() << " " << LLWearableType::getTypeLabel((LLWearableType::EType)type) << ": " << (wearable ? wearable->getAssetID() : LLUUID::null) << LL_ENDL; } gAgent.sendReliableMessage(); } void LLAgentWearables::saveWearable(const LLWearableType::EType type, const U32 index, BOOL send_update, const std::string new_name) { LLViewerWearable* old_wearable = getViewerWearable(type, index); if(!old_wearable) return; bool name_changed = !new_name.empty() && (new_name != old_wearable->getName()); if (name_changed || old_wearable->isDirty() || old_wearable->isOldVersion()) { LLUUID old_item_id = old_wearable->getItemID(); LLViewerWearable* new_wearable = LLWearableList::instance().createCopy(old_wearable); new_wearable->setItemID(old_item_id); // should this be in LLViewerWearable::copyDataFrom()? setWearable(type,index,new_wearable); // old_wearable may still be referred to by other inventory items. Revert // unsaved changes so other inventory items aren't affected by the changes // that were just saved. old_wearable->revertValues(); LLInventoryItem* item = gInventory.getItem(old_item_id); if (item) { std::string item_name = item->getName(); if (name_changed) { LL_INFOS() << "saveWearable changing name from " << item->getName() << " to " << new_name << LL_ENDL; item_name = new_name; } // Update existing inventory item LLPointer<LLViewerInventoryItem> template_item = new LLViewerInventoryItem(item->getUUID(), item->getParentUUID(), item->getPermissions(), new_wearable->getAssetID(), new_wearable->getAssetType(), item->getInventoryType(), item_name, item->getDescription(), item->getSaleInfo(), item->getFlags(), item->getCreationDate()); template_item->setTransactionID(new_wearable->getTransactionID()); template_item->updateServer(FALSE); gInventory.updateItem(template_item); if (name_changed) { gInventory.notifyObservers(); } } else { // Add a new inventory item (shouldn't ever happen here) U32 todo = addWearableToAgentInventoryCallback::CALL_NONE; if (send_update) { todo |= addWearableToAgentInventoryCallback::CALL_UPDATE; } LLPointer<LLInventoryCallback> cb = new addWearableToAgentInventoryCallback( LLPointer<LLRefCount>(NULL), type, index, new_wearable, todo); addWearableToAgentInventory(cb, new_wearable); return; } gAgentAvatarp->wearableUpdated( type, TRUE ); if (send_update) { sendAgentWearablesUpdate(); } } } void LLAgentWearables::saveWearableAs(const LLWearableType::EType type, const U32 index, const std::string& new_name, const std::string& description, BOOL save_in_lost_and_found) { if (!isWearableCopyable(type, index)) { LL_WARNS() << "LLAgent::saveWearableAs() not copyable." << LL_ENDL; return; } LLViewerWearable* old_wearable = getViewerWearable(type, index); if (!old_wearable) { LL_WARNS() << "LLAgent::saveWearableAs() no old wearable." << LL_ENDL; return; } LLInventoryItem* item = gInventory.getItem(getWearableItemID(type,index)); if (!item) { LL_WARNS() << "LLAgent::saveWearableAs() no inventory item." << LL_ENDL; return; } std::string trunc_name(new_name); LLStringUtil::truncate(trunc_name, DB_INV_ITEM_NAME_STR_LEN); LLViewerWearable* new_wearable = LLWearableList::instance().createCopy( old_wearable, trunc_name); LLPointer<LLInventoryCallback> cb = new addWearableToAgentInventoryCallback( LLPointer<LLRefCount>(NULL), type, index, new_wearable, addWearableToAgentInventoryCallback::CALL_WEARITEM, description ); LLUUID category_id; if (save_in_lost_and_found) { category_id = gInventory.findCategoryUUIDForType( LLFolderType::FT_LOST_AND_FOUND); } else { // put in same folder as original category_id = item->getParentUUID(); } copy_inventory_item( gAgent.getID(), item->getPermissions().getOwner(), item->getUUID(), category_id, new_name, cb); // old_wearable may still be referred to by other inventory items. Revert // unsaved changes so other inventory items aren't affected by the changes // that were just saved. old_wearable->revertValues(); } void LLAgentWearables::revertWearable(const LLWearableType::EType type, const U32 index) { LLViewerWearable* wearable = getViewerWearable(type, index); llassert(wearable); if (wearable) { wearable->revertValues(); } gAgent.sendAgentSetAppearance(); } void LLAgentWearables::saveAllWearables() { //if (!gInventory.isLoaded()) //{ // return; //} for (S32 i=0; i < LLWearableType::WT_COUNT; i++) { for (U32 j=0; j < getWearableCount((LLWearableType::EType)i); j++) saveWearable((LLWearableType::EType)i, j, FALSE); } sendAgentWearablesUpdate(); } // Called when the user changes the name of a wearable inventory item that is currently being worn. void LLAgentWearables::setWearableName(const LLUUID& item_id, const std::string& new_name) { for (S32 i=0; i < LLWearableType::WT_COUNT; i++) { for (U32 j=0; j < getWearableCount((LLWearableType::EType)i); j++) { LLUUID curr_item_id = getWearableItemID((LLWearableType::EType)i,j); if (curr_item_id == item_id) { LLViewerWearable* old_wearable = getViewerWearable((LLWearableType::EType)i,j); llassert(old_wearable); if (!old_wearable) continue; std::string old_name = old_wearable->getName(); old_wearable->setName(new_name); LLViewerWearable* new_wearable = LLWearableList::instance().createCopy(old_wearable); new_wearable->setItemID(item_id); LLInventoryItem* item = gInventory.getItem(item_id); if (item) { new_wearable->setPermissions(item->getPermissions()); } old_wearable->setName(old_name); setWearable((LLWearableType::EType)i,j,new_wearable); sendAgentWearablesUpdate(); break; } } } } BOOL LLAgentWearables::isWearableModifiable(LLWearableType::EType type, U32 index) const { LLUUID item_id = getWearableItemID(type, index); return item_id.notNull() ? isWearableModifiable(item_id) : FALSE; } BOOL LLAgentWearables::isWearableModifiable(const LLUUID& item_id) const { const LLUUID& linked_id = gInventory.getLinkedItemID(item_id); if (linked_id.notNull()) { LLInventoryItem* item = gInventory.getItem(linked_id); if (item && item->getPermissions().allowModifyBy(gAgent.getID(), gAgent.getGroupID())) { return TRUE; } } return FALSE; } BOOL LLAgentWearables::isWearableCopyable(LLWearableType::EType type, U32 index) const { LLUUID item_id = getWearableItemID(type, index); if (!item_id.isNull()) { LLInventoryItem* item = gInventory.getItem(item_id); if (item && item->getPermissions().allowCopyBy(gAgent.getID(), gAgent.getGroupID())) { return TRUE; } } return FALSE; } /* U32 LLAgentWearables::getWearablePermMask(LLWearableType::EType type) { LLUUID item_id = getWearableItemID(type); if (!item_id.isNull()) { LLInventoryItem* item = gInventory.getItem(item_id); if (item) { return item->getPermissions().getMaskOwner(); } } return PERM_NONE; } */ LLInventoryItem* LLAgentWearables::getWearableInventoryItem(LLWearableType::EType type, U32 index) { LLUUID item_id = getWearableItemID(type,index); LLInventoryItem* item = NULL; if (item_id.notNull()) { item = gInventory.getItem(item_id); } return item; } const LLViewerWearable* LLAgentWearables::getWearableFromItemID(const LLUUID& item_id) const { const LLUUID& base_item_id = gInventory.getLinkedItemID(item_id); for (S32 i=0; i < LLWearableType::WT_COUNT; i++) { for (U32 j=0; j < getWearableCount((LLWearableType::EType)i); j++) { const LLViewerWearable * curr_wearable = getViewerWearable((LLWearableType::EType)i, j); if (curr_wearable && (curr_wearable->getItemID() == base_item_id)) { return curr_wearable; } } } return NULL; } LLViewerWearable* LLAgentWearables::getWearableFromItemID(const LLUUID& item_id) { const LLUUID& base_item_id = gInventory.getLinkedItemID(item_id); for (S32 i=0; i < LLWearableType::WT_COUNT; i++) { for (U32 j=0; j < getWearableCount((LLWearableType::EType)i); j++) { LLViewerWearable * curr_wearable = getViewerWearable((LLWearableType::EType)i, j); if (curr_wearable && (curr_wearable->getItemID() == base_item_id)) { return curr_wearable; } } } return NULL; } LLViewerWearable* LLAgentWearables::getWearableFromAssetID(const LLUUID& asset_id) { for (S32 i=0; i < LLWearableType::WT_COUNT; i++) { for (U32 j=0; j < getWearableCount((LLWearableType::EType)i); j++) { LLViewerWearable * curr_wearable = getViewerWearable((LLWearableType::EType)i, j); if (curr_wearable && (curr_wearable->getAssetID() == asset_id)) { return curr_wearable; } } } return NULL; } void LLAgentWearables::sendAgentWearablesRequest() { gMessageSystem->newMessageFast(_PREHASH_AgentWearablesRequest); gMessageSystem->nextBlockFast(_PREHASH_AgentData); gMessageSystem->addUUIDFast(_PREHASH_AgentID, gAgent.getID()); gMessageSystem->addUUIDFast(_PREHASH_SessionID, gAgent.getSessionID()); gAgent.sendReliableMessage(); } LLViewerWearable* LLAgentWearables::getViewerWearable(const LLWearableType::EType type, U32 index /*= 0*/) { return dynamic_cast<LLViewerWearable*> (getWearable(type, index)); } const LLViewerWearable* LLAgentWearables::getViewerWearable(const LLWearableType::EType type, U32 index /*= 0*/) const { return dynamic_cast<const LLViewerWearable*> (getWearable(type, index)); } // static BOOL LLAgentWearables::selfHasWearable(LLWearableType::EType type) { return (gAgentWearables.getWearableCount(type) > 0); } // virtual void LLAgentWearables::wearableUpdated(LLWearable *wearable, BOOL removed) { if (isAgentAvatarValid()) { const BOOL upload_result = removed; gAgentAvatarp->wearableUpdated(wearable->getType(), upload_result); } LLWearableData::wearableUpdated(wearable, removed); if (!removed) { LLViewerWearable* viewer_wearable = dynamic_cast<LLViewerWearable*>(wearable); viewer_wearable->refreshName(); // Hack pt 2. If the wearable we just loaded has definition version 24, // then force a re-save of this wearable after slamming the version number to 22. // This number was incorrectly incremented for internal builds before release, and // this fix will ensure that the affected wearables are re-saved with the right version number. // the versions themselves are compatible. This code can be removed before release. if( wearable->getDefinitionVersion() == 24 ) { wearable->setDefinitionVersion(22); U32 index = getWearableIndex(wearable); LL_INFOS() << "forcing wearable type " << wearable->getType() << " to version 22 from 24" << LL_ENDL; saveWearable(wearable->getType(),index,TRUE); } checkWearableAgainstInventory(viewer_wearable); } } BOOL LLAgentWearables::itemUpdatePending(const LLUUID& item_id) const { return mItemsAwaitingWearableUpdate.find(item_id) != mItemsAwaitingWearableUpdate.end(); } U32 LLAgentWearables::itemUpdatePendingCount() const { return mItemsAwaitingWearableUpdate.size(); } const LLUUID LLAgentWearables::getWearableItemID(LLWearableType::EType type, U32 index) const { const LLViewerWearable *wearable = getViewerWearable(type,index); if (wearable) return wearable->getItemID(); else return LLUUID(); } // [RLVa:KB] - Checked: 2011-03-31 (RLVa-1.3.0) void LLAgentWearables::getWearableItemIDs(uuid_vec_t& idItems) const { for (wearableentry_map_t::const_iterator itWearableType = mWearableDatas.begin(); itWearableType != mWearableDatas.end(); ++itWearableType) { getWearableItemIDs(itWearableType->first, idItems); } } void LLAgentWearables::getWearableItemIDs(LLWearableType::EType eType, uuid_vec_t& idItems) const { wearableentry_map_t::const_iterator itWearableType = mWearableDatas.find(eType); if (mWearableDatas.end() != itWearableType) { for (wearableentry_vec_t::const_iterator itWearable = itWearableType->second.begin(); itWearable != itWearableType->second.end(); ++itWearable) { const LLViewerWearable* pWearable = dynamic_cast<LLViewerWearable*>(*itWearable); if (pWearable) { idItems.push_back(pWearable->getItemID()); } } } } // [/RLVa:KB] const LLUUID LLAgentWearables::getWearableAssetID(LLWearableType::EType type, U32 index) const { const LLViewerWearable *wearable = getViewerWearable(type,index); if (wearable) return wearable->getAssetID(); else return LLUUID(); } BOOL LLAgentWearables::isWearingItem(const LLUUID& item_id) const { return getWearableFromItemID(item_id) != NULL; } // MULTI-WEARABLE: DEPRECATED (see backwards compatibility) // static // ! BACKWARDS COMPATIBILITY ! When we stop supporting viewer1.23, we can assume // that viewers have a Current Outfit Folder and won't need this message, and thus // we can remove/ignore this whole function. EXCEPT gAgentWearables.notifyLoadingStarted void LLAgentWearables::processAgentInitialWearablesUpdate(LLMessageSystem* mesgsys, void** user_data) { // We should only receive this message a single time. Ignore subsequent AgentWearablesUpdates // that may result from AgentWearablesRequest having been sent more than once. if (mInitialWearablesUpdateReceived) return; if (isAgentAvatarValid()) { gAgentAvatarp->startPhase("process_initial_wearables_update"); gAgentAvatarp->outputRezTiming("Received initial wearables update"); } // notify subscribers that wearables started loading. See EXT-7777 // *TODO: find more proper place to not be called from deprecated method. // Seems such place is found: LLInitialWearablesFetch::processContents() gAgentWearables.notifyLoadingStarted(); mInitialWearablesUpdateReceived = true; LLUUID agent_id; gMessageSystem->getUUIDFast(_PREHASH_AgentData, _PREHASH_AgentID, agent_id); if (isAgentAvatarValid() && (agent_id == gAgentAvatarp->getID())) { gMessageSystem->getU32Fast(_PREHASH_AgentData, _PREHASH_SerialNum, gAgentQueryManager.mUpdateSerialNum); const S32 NUM_BODY_PARTS = 4; S32 num_wearables = gMessageSystem->getNumberOfBlocksFast(_PREHASH_WearableData); if (num_wearables < NUM_BODY_PARTS) { // Transitional state. Avatars should always have at least their body parts (hair, eyes, shape and skin). // The fact that they don't have any here (only a dummy is sent) implies that either: // 1. This account existed before we had wearables // 2. The database has gotten messed up // 3. This is the account's first login (i.e. the wearables haven't been generated yet). return; } // Get the UUID of the current outfit folder (will be created if it doesn't exist) const LLUUID current_outfit_id = gInventory.findCategoryUUIDForType(LLFolderType::FT_CURRENT_OUTFIT); LLInitialWearablesFetch* outfit = new LLInitialWearablesFetch(current_outfit_id); //LL_DEBUGS() << "processAgentInitialWearablesUpdate()" << LL_ENDL; // Add wearables // MULTI-WEARABLE: DEPRECATED: Message only supports one wearable per type, will be ignored in future. gAgentWearables.mItemsAwaitingWearableUpdate.clear(); for (S32 i=0; i < num_wearables; i++) { // Parse initial wearables data from message system U8 type_u8 = 0; gMessageSystem->getU8Fast(_PREHASH_WearableData, _PREHASH_WearableType, type_u8, i); if (type_u8 >= LLWearableType::WT_COUNT) { continue; } const LLWearableType::EType type = (LLWearableType::EType) type_u8; LLUUID item_id; gMessageSystem->getUUIDFast(_PREHASH_WearableData, _PREHASH_ItemID, item_id, i); LLUUID asset_id; gMessageSystem->getUUIDFast(_PREHASH_WearableData, _PREHASH_AssetID, asset_id, i); if (asset_id.isNull()) { LLViewerWearable::removeFromAvatar(type, FALSE); } else { LLAssetType::EType asset_type = LLWearableType::getAssetType(type); if (asset_type == LLAssetType::AT_NONE) { continue; } // MULTI-WEARABLE: DEPRECATED: this message only supports one wearable per type. Should be ignored in future versions // Store initial wearables data until we know whether we have the current outfit folder or need to use the data. LLInitialWearablesFetch::InitialWearableData wearable_data(type, item_id, asset_id); outfit->add(wearable_data); } LL_DEBUGS() << " " << LLWearableType::getTypeLabel(type) << LL_ENDL; } // Get the complete information on the items in the inventory and set up an observer // that will trigger when the complete information is fetched. outfit->startFetch(); if(outfit->isFinished()) { // everything is already here - call done. outfit->done(); } else { // it's all on it's way - add an observer, and the inventory // will call done for us when everything is here. gInventory.addObserver(outfit); } } } // Normally, all wearables referred to "AgentWearablesUpdate" will correspond to actual assets in the // database. If for some reason, we can't load one of those assets, we can try to reconstruct it so that // the user isn't left without a shape, for example. (We can do that only after the inventory has loaded.) void LLAgentWearables::recoverMissingWearable(const LLWearableType::EType type, U32 index) { // Try to recover by replacing missing wearable with a new one. LLNotificationsUtil::add("ReplacedMissingWearable"); LL_DEBUGS() << "Wearable " << LLWearableType::getTypeLabel(type) << " could not be downloaded. Replaced inventory item with default wearable." << LL_ENDL; LLViewerWearable* new_wearable = LLWearableList::instance().createNewWearable(type, gAgentAvatarp); setWearable(type,index,new_wearable); //new_wearable->writeToAvatar(TRUE); // Add a new one in the lost and found folder. // (We used to overwrite the "not found" one, but that could potentially // destory content.) JC const LLUUID lost_and_found_id = gInventory.findCategoryUUIDForType(LLFolderType::FT_LOST_AND_FOUND); LLPointer<LLInventoryCallback> cb = new addWearableToAgentInventoryCallback( LLPointer<LLRefCount>(NULL), type, index, new_wearable, addWearableToAgentInventoryCallback::CALL_RECOVERDONE); addWearableToAgentInventory(cb, new_wearable, lost_and_found_id, TRUE); } void LLAgentWearables::recoverMissingWearableDone() { // Have all the wearables that the avatar was wearing at log-in arrived or been fabricated? updateWearablesLoaded(); if (areWearablesLoaded()) { // Make sure that the server's idea of the avatar's wearables actually match the wearables. gAgent.sendAgentSetAppearance(); } else { gInventory.addChangedMask(LLInventoryObserver::LABEL, LLUUID::null); gInventory.notifyObservers(); } } void LLAgentWearables::addLocalTextureObject(const LLWearableType::EType wearable_type, const LLAvatarAppearanceDefines::ETextureIndex texture_type, U32 wearable_index) { LLViewerWearable* wearable = getViewerWearable((LLWearableType::EType)wearable_type, wearable_index); if (!wearable) { LL_ERRS() << "Tried to add local texture object to invalid wearable with type " << wearable_type << " and index " << wearable_index << LL_ENDL; return; } LLLocalTextureObject lto; wearable->setLocalTextureObject(texture_type, lto); } class OnWearableItemCreatedCB: public LLInventoryCallback { public: OnWearableItemCreatedCB(): mWearablesAwaitingItems(LLWearableType::WT_COUNT,NULL) { LL_INFOS() << "created callback" << LL_ENDL; } /* virtual */ void fire(const LLUUID& inv_item) { LL_INFOS() << "One item created " << inv_item.asString() << LL_ENDL; LLViewerInventoryItem *item = gInventory.getItem(inv_item); mItemsToLink.push_back(item); updatePendingWearable(inv_item); } ~OnWearableItemCreatedCB() { LL_INFOS() << "All items created" << LL_ENDL; LLPointer<LLInventoryCallback> link_waiter = new LLUpdateAppearanceOnDestroy; LLAppearanceMgr::instance().linkAll(LLAppearanceMgr::instance().getCOF(), mItemsToLink, link_waiter); } void addPendingWearable(LLViewerWearable *wearable) { if (!wearable) { LL_WARNS() << "no wearable" << LL_ENDL; return; } LLWearableType::EType type = wearable->getType(); if (type<LLWearableType::WT_COUNT) { mWearablesAwaitingItems[type] = wearable; } else { LL_WARNS() << "invalid type " << type << LL_ENDL; } } void updatePendingWearable(const LLUUID& inv_item) { LLViewerInventoryItem *item = gInventory.getItem(inv_item); if (!item) { LL_WARNS() << "no item found" << LL_ENDL; return; } if (!item->isWearableType()) { LL_WARNS() << "non-wearable item found" << LL_ENDL; return; } if (item && item->isWearableType()) { LLWearableType::EType type = item->getWearableType(); if (type < LLWearableType::WT_COUNT) { LLViewerWearable *wearable = mWearablesAwaitingItems[type]; if (wearable) wearable->setItemID(inv_item); } else { LL_WARNS() << "invalid wearable type " << type << LL_ENDL; } } } private: LLInventoryModel::item_array_t mItemsToLink; std::vector<LLViewerWearable*> mWearablesAwaitingItems; }; void LLAgentWearables::createStandardWearables() { LL_WARNS() << "Creating standard wearables" << LL_ENDL; if (!isAgentAvatarValid()) return; const BOOL create[LLWearableType::WT_COUNT] = { TRUE, //LLWearableType::WT_SHAPE TRUE, //LLWearableType::WT_SKIN TRUE, //LLWearableType::WT_HAIR TRUE, //LLWearableType::WT_EYES TRUE, //LLWearableType::WT_SHIRT TRUE, //LLWearableType::WT_PANTS TRUE, //LLWearableType::WT_SHOES TRUE, //LLWearableType::WT_SOCKS FALSE, //LLWearableType::WT_JACKET FALSE, //LLWearableType::WT_GLOVES TRUE, //LLWearableType::WT_UNDERSHIRT TRUE, //LLWearableType::WT_UNDERPANTS FALSE //LLWearableType::WT_SKIRT }; LLPointer<LLInventoryCallback> cb = new OnWearableItemCreatedCB; for (S32 i=0; i < LLWearableType::WT_COUNT; i++) { if (create[i]) { llassert(getWearableCount((LLWearableType::EType)i) == 0); LLViewerWearable* wearable = LLWearableList::instance().createNewWearable((LLWearableType::EType)i, gAgentAvatarp); ((OnWearableItemCreatedCB*)(&(*cb)))->addPendingWearable(wearable); // no need to update here... LLUUID category_id = LLUUID::null; create_inventory_item(gAgent.getID(), gAgent.getSessionID(), category_id, wearable->getTransactionID(), wearable->getName(), wearable->getDescription(), wearable->getAssetType(), LLInventoryType::IT_WEARABLE, wearable->getType(), wearable->getPermissions().getMaskNextOwner(), cb); } } } void LLAgentWearables::createStandardWearablesDone(S32 type, U32 index) { LL_INFOS() << "type " << type << " index " << index << LL_ENDL; if (!isAgentAvatarValid()) return; gAgentAvatarp->updateVisualParams(); } void LLAgentWearables::createStandardWearablesAllDone() { // ... because sendAgentWearablesUpdate will notify inventory // observers. LL_INFOS() << "all done?" << LL_ENDL; mWearablesLoaded = TRUE; checkWearablesLoaded(); notifyLoadingFinished(); updateServer(); // Treat this as the first texture entry message, if none received yet gAgentAvatarp->onFirstTEMessageReceived(); } void LLAgentWearables::makeNewOutfitDone(S32 type, U32 index) { LLUUID first_item_id = getWearableItemID((LLWearableType::EType)type, index); // Open the inventory and select the first item we added. if (first_item_id.notNull()) { LLInventoryPanel *active_panel = LLInventoryPanel::getActiveInventoryPanel(); if (active_panel) { active_panel->setSelection(first_item_id, TAKE_FOCUS_NO); } } } void LLAgentWearables::addWearableToAgentInventory(LLPointer<LLInventoryCallback> cb, LLViewerWearable* wearable, const LLUUID& category_id, BOOL notify) { create_inventory_item(gAgent.getID(), gAgent.getSessionID(), category_id, wearable->getTransactionID(), wearable->getName(), wearable->getDescription(), wearable->getAssetType(), LLInventoryType::IT_WEARABLE, wearable->getType(), wearable->getPermissions().getMaskNextOwner(), cb); } void LLAgentWearables::removeWearable(const LLWearableType::EType type, bool do_remove_all, U32 index) { if (gAgent.isTeen() && (type == LLWearableType::WT_UNDERSHIRT || type == LLWearableType::WT_UNDERPANTS)) { // Can't take off underclothing in simple UI mode or on PG accounts // TODO: enable the removing of a single undershirt/underpants if multiple are worn. - Nyx return; } if (getWearableCount(type) == 0) { // no wearables to remove return; } if (do_remove_all) { removeWearableFinal(type, do_remove_all, index); } else { LLViewerWearable* old_wearable = getViewerWearable(type,index); if (old_wearable) { if (old_wearable->isDirty()) { LLSD payload; payload["wearable_type"] = (S32)type; payload["wearable_index"] = (S32)index; // Bring up view-modal dialog: Save changes? Yes, No, Cancel LLNotificationsUtil::add("WearableSave", LLSD(), payload, &LLAgentWearables::onRemoveWearableDialog); return; } else { removeWearableFinal(type, do_remove_all, index); } } } } // static bool LLAgentWearables::onRemoveWearableDialog(const LLSD& notification, const LLSD& response) { S32 option = LLNotificationsUtil::getSelectedOption(notification, response); LLWearableType::EType type = (LLWearableType::EType)notification["payload"]["wearable_type"].asInteger(); S32 index = (S32)notification["payload"]["wearable_index"].asInteger(); switch(option) { case 0: // "Save" gAgentWearables.saveWearable(type, index); gAgentWearables.removeWearableFinal(type, false, index); break; case 1: // "Don't Save" gAgentWearables.removeWearableFinal(type, false, index); break; case 2: // "Cancel" break; default: llassert(0); break; } return false; } // Called by removeWearable() and onRemoveWearableDialog() to actually do the removal. void LLAgentWearables::removeWearableFinal(const LLWearableType::EType type, bool do_remove_all, U32 index) { //LLAgentDumper dumper("removeWearable"); if (do_remove_all) { S32 max_entry = getWearableCount(type)-1; for (S32 i=max_entry; i>=0; i--) { LLViewerWearable* old_wearable = getViewerWearable(type,i); //queryWearableCache(); // moved below if (old_wearable) { popWearable(old_wearable); old_wearable->removeFromAvatar(TRUE); } } // clearWearableType(type); // [RLVa:KB] - Checked: 2010-05-14 (RLVa-1.2.0) // The line above shouldn't be needed RLV_VERIFY(0 == getWearableCount(type)); // [/RLVa:KB] } else { LLViewerWearable* old_wearable = getViewerWearable(type, index); //queryWearableCache(); // moved below if (old_wearable) { popWearable(old_wearable); old_wearable->removeFromAvatar(TRUE); } } queryWearableCache(); // Update the server updateServer(); gInventory.notifyObservers(); } // Assumes existing wearables are not dirty. void LLAgentWearables::setWearableOutfit(const LLInventoryItem::item_array_t& items, const std::vector< LLViewerWearable* >& wearables, BOOL remove) { LL_INFOS() << "setWearableOutfit() start" << LL_ENDL; // TODO: Removed check for ensuring that teens don't remove undershirt and underwear. Handle later if (remove) { // note: shirt is the first non-body part wearable item. Update if wearable order changes. // This loop should remove all clothing, but not any body parts for (S32 type = 0; type < (S32)LLWearableType::WT_COUNT; type++) { if (LLWearableType::getAssetType((LLWearableType::EType)type) == LLAssetType::AT_CLOTHING) { removeWearable((LLWearableType::EType)type, true, 0); } } } S32 count = wearables.size(); llassert(items.size() == count); S32 i; for (i = 0; i < count; i++) { LLViewerWearable* new_wearable = wearables[i]; LLPointer<LLInventoryItem> new_item = items[i]; llassert(new_wearable); if (new_wearable) { const LLWearableType::EType type = new_wearable->getType(); new_wearable->setName(new_item->getName()); new_wearable->setItemID(new_item->getUUID()); if (LLWearableType::getAssetType(type) == LLAssetType::AT_BODYPART) { // exactly one wearable per body part setWearable(type,0,new_wearable); } else { pushWearable(type,new_wearable); } const BOOL removed = FALSE; wearableUpdated(new_wearable, removed); } } gInventory.notifyObservers(); if (isAgentAvatarValid()) { gAgentAvatarp->setCompositeUpdatesEnabled(TRUE); gAgentAvatarp->updateVisualParams(); // If we have not yet declouded, we may want to use // baked texture UUIDs sent from the first objectUpdate message // don't overwrite these. If we have already declouded, we've saved // these ids as the last known good textures and can invalidate without // re-clouding. if (!gAgentAvatarp->getIsCloud()) { gAgentAvatarp->invalidateAll(); } } // Start rendering & update the server mWearablesLoaded = TRUE; checkWearablesLoaded(); // [SL:KB] - Patch: Appearance-InitialWearablesLoadedCallback | Checked: 2010-09-22 (Catznip-2.2) if (!mInitialWearablesLoaded) { mInitialWearablesLoaded = true; mInitialWearablesLoadedSignal(); } // [/SL:KB] notifyLoadingFinished(); queryWearableCache(); updateServer(); gAgentAvatarp->dumpAvatarTEs("setWearableOutfit"); LL_DEBUGS() << "setWearableOutfit() end" << LL_ENDL; } //// User has picked "wear on avatar" from a menu. //void LLAgentWearables::setWearableItem(LLInventoryItem* new_item, LLViewerWearable* new_wearable, bool do_append) //{ // //LLAgentDumper dumper("setWearableItem"); // if (isWearingItem(new_item->getUUID())) // { // LL_WARNS() << "wearable " << new_item->getUUID() << " is already worn" << LL_ENDL; // return; // } // // const LLWearableType::EType type = new_wearable->getType(); // // if (!do_append) // { // // Remove old wearable, if any // // MULTI_WEARABLE: hardwired to 0 // LLViewerWearable* old_wearable = getViewerWearable(type,0); // if (old_wearable) // { // const LLUUID& old_item_id = old_wearable->getItemID(); // if ((old_wearable->getAssetID() == new_wearable->getAssetID()) && // (old_item_id == new_item->getUUID())) // { // LL_DEBUGS() << "No change to wearable asset and item: " << LLWearableType::getTypeName(type) << LL_ENDL; // return; // } // // if (old_wearable->isDirty()) // { // // Bring up modal dialog: Save changes? Yes, No, Cancel // LLSD payload; // payload["item_id"] = new_item->getUUID(); // LLNotificationsUtil::add("WearableSave", LLSD(), payload, boost::bind(onSetWearableDialog, _1, _2, new_wearable)); // return; // } // } // } // // setWearableFinal(new_item, new_wearable, do_append); //} // static bool LLAgentWearables::onSetWearableDialog(const LLSD& notification, const LLSD& response, LLViewerWearable* wearable) { S32 option = LLNotificationsUtil::getSelectedOption(notification, response); LLInventoryItem* new_item = gInventory.getItem(notification["payload"]["item_id"].asUUID()); U32 index = gAgentWearables.getWearableIndex(wearable); if (!new_item) { delete wearable; return false; } switch(option) { case 0: // "Save" gAgentWearables.saveWearable(wearable->getType(),index); gAgentWearables.setWearableFinal(new_item, wearable); break; case 1: // "Don't Save" gAgentWearables.setWearableFinal(new_item, wearable); break; case 2: // "Cancel" break; default: llassert(0); break; } delete wearable; return false; } // Called from setWearableItem() and onSetWearableDialog() to actually set the wearable. // MULTI_WEARABLE: unify code after null objects are gone. void LLAgentWearables::setWearableFinal(LLInventoryItem* new_item, LLViewerWearable* new_wearable, bool do_append) { const LLWearableType::EType type = new_wearable->getType(); if (do_append && getWearableItemID(type,0).notNull()) { new_wearable->setItemID(new_item->getUUID()); const bool trigger_updated = false; pushWearable(type, new_wearable, trigger_updated); LL_INFOS() << "Added additional wearable for type " << type << " size is now " << getWearableCount(type) << LL_ENDL; checkWearableAgainstInventory(new_wearable); } else { // Replace the old wearable with a new one. llassert(new_item->getAssetUUID() == new_wearable->getAssetID()); LLViewerWearable *old_wearable = getViewerWearable(type,0); LLUUID old_item_id; if (old_wearable) { old_item_id = old_wearable->getItemID(); } new_wearable->setItemID(new_item->getUUID()); setWearable(type,0,new_wearable); if (old_item_id.notNull()) { gInventory.addChangedMask(LLInventoryObserver::LABEL, old_item_id); gInventory.notifyObservers(); } LL_INFOS() << "Replaced current element 0 for type " << type << " size is now " << getWearableCount(type) << LL_ENDL; } //LL_INFOS() << "LLVOAvatar::setWearableItem()" << LL_ENDL; queryWearableCache(); //new_wearable->writeToAvatar(TRUE); updateServer(); } void LLAgentWearables::queryWearableCache() { if (!areWearablesLoaded() || (gAgent.getRegion() && gAgent.getRegion()->getCentralBakeVersion())) { return; } // Look up affected baked textures. // If they exist: // disallow updates for affected layersets (until dataserver responds with cache request.) // If cache miss, turn updates back on and invalidate composite. // If cache hit, modify baked texture entries. // // Cache requests contain list of hashes for each baked texture entry. // Response is list of valid baked texture assets. (same message) gMessageSystem->newMessageFast(_PREHASH_AgentCachedTexture); gMessageSystem->nextBlockFast(_PREHASH_AgentData); gMessageSystem->addUUIDFast(_PREHASH_AgentID, gAgent.getID()); gMessageSystem->addUUIDFast(_PREHASH_SessionID, gAgent.getSessionID()); gMessageSystem->addS32Fast(_PREHASH_SerialNum, gAgentQueryManager.mWearablesCacheQueryID); S32 num_queries = 0; for (U8 baked_index = 0; baked_index < BAKED_NUM_INDICES; baked_index++) { LLUUID hash_id = computeBakedTextureHash((EBakedTextureIndex) baked_index); if (hash_id.notNull()) { num_queries++; // *NOTE: make sure at least one request gets packed ETextureIndex te_index = LLAvatarAppearanceDictionary::bakedToLocalTextureIndex((EBakedTextureIndex)baked_index); //LL_INFOS() << "Requesting texture for hash " << hash << " in baked texture slot " << baked_index << LL_ENDL; gMessageSystem->nextBlockFast(_PREHASH_WearableData); gMessageSystem->addUUIDFast(_PREHASH_ID, hash_id); gMessageSystem->addU8Fast(_PREHASH_TextureIndex, (U8)te_index); } gAgentQueryManager.mActiveCacheQueries[baked_index] = gAgentQueryManager.mWearablesCacheQueryID; } //VWR-22113: gAgent.getRegion() can return null if invalid, seen here on logout if(gAgent.getRegion()) { if (isAgentAvatarValid()) { selfStartPhase("fetch_texture_cache_entries"); gAgentAvatarp->outputRezTiming("Fetching textures from cache"); } LL_DEBUGS("Avatar") << gAgentAvatarp->avString() << "Requesting texture cache entry for " << num_queries << " baked textures" << LL_ENDL; gMessageSystem->sendReliable(gAgent.getRegion()->getHost()); gAgentQueryManager.mNumPendingQueries++; gAgentQueryManager.mWearablesCacheQueryID++; } } // virtual void LLAgentWearables::invalidateBakedTextureHash(LLMD5& hash) const { // Add some garbage into the hash so that it becomes invalid. if (isAgentAvatarValid()) { hash.update((const unsigned char*)gAgentAvatarp->getID().mData, UUID_BYTES); } } // User has picked "remove from avatar" from a menu. // static //void LLAgentWearables::userRemoveWearable(const LLWearableType::EType &type, const U32 &index) //{ // if (!(type==LLWearableType::WT_SHAPE || type==LLWearableType::WT_SKIN || type==LLWearableType::WT_HAIR || type==LLWearableType::WT_EYES)) //&& // //!((!gAgent.isTeen()) && (type==LLWearableType::WT_UNDERPANTS || type==LLWearableType::WT_UNDERSHIRT))) // { // gAgentWearables.removeWearable(type,false,index); // } //} //static //void LLAgentWearables::userRemoveWearablesOfType(const LLWearableType::EType &type) //{ // if (!(type==LLWearableType::WT_SHAPE || type==LLWearableType::WT_SKIN || type==LLWearableType::WT_HAIR || type==LLWearableType::WT_EYES)) //&& // //!((!gAgent.isTeen()) && (type==LLWearableType::WT_UNDERPANTS || type==LLWearableType::WT_UNDERSHIRT))) // { // gAgentWearables.removeWearable(type,true,0); // } //} // Combines userRemoveMulipleAttachments() and userAttachMultipleAttachments() logic to // get attachments into desired state with minimal number of adds/removes. //void LLAgentWearables::userUpdateAttachments(LLInventoryModel::item_array_t& obj_item_array) // [SL:KB] - Patch: Appearance-SyncAttach | Checked: 2010-09-22 (Catznip-2.2) void LLAgentWearables::userUpdateAttachments(LLInventoryModel::item_array_t& obj_item_array, bool attach_only) // [/SL:KB] { // Possible cases: // already wearing but not in request set -> take off. // already wearing and in request set -> leave alone. // not wearing and in request set -> put on. if (!isAgentAvatarValid()) return; std::set<LLUUID> requested_item_ids; std::set<LLUUID> current_item_ids; for (S32 i=0; i<obj_item_array.size(); i++) { const LLUUID & requested_id = obj_item_array[i].get()->getLinkedUUID(); //LL_INFOS() << "Requested attachment id " << requested_id << LL_ENDL; requested_item_ids.insert(requested_id); } // Build up list of objects to be removed and items currently attached. llvo_vec_t objects_to_remove; for (LLVOAvatar::attachment_map_t::iterator iter = gAgentAvatarp->mAttachmentPoints.begin(); iter != gAgentAvatarp->mAttachmentPoints.end();) { LLVOAvatar::attachment_map_t::iterator curiter = iter++; LLViewerJointAttachment* attachment = curiter->second; for (LLViewerJointAttachment::attachedobjs_vec_t::iterator attachment_iter = attachment->mAttachedObjects.begin(); attachment_iter != attachment->mAttachedObjects.end(); ++attachment_iter) { LLViewerObject *objectp = (*attachment_iter); if (objectp) { LLUUID object_item_id = objectp->getAttachmentItemID(); bool remove_attachment = true; if (requested_item_ids.find(object_item_id) != requested_item_ids.end()) { // Object currently worn, was requested to keep it // Flag as currently worn so we won't have to add it again. remove_attachment = false; } else if (objectp->isTempAttachment()) { // Check if we should keep this temp attachment remove_attachment = LLAppearanceMgr::instance().shouldRemoveTempAttachment(objectp->getID()); } if (remove_attachment) { // LL_INFOS() << "found object to remove, id " << objectp->getID() << ", item " << objectp->getAttachmentItemID() << LL_ENDL; objects_to_remove.push_back(objectp); } else { // LL_INFOS() << "found object to keep, id " << objectp->getID() << ", item " << objectp->getAttachmentItemID() << LL_ENDL; current_item_ids.insert(object_item_id); } } } } LLInventoryModel::item_array_t items_to_add; for (LLInventoryModel::item_array_t::iterator it = obj_item_array.begin(); it != obj_item_array.end(); ++it) { LLUUID linked_id = (*it).get()->getLinkedUUID(); if (current_item_ids.find(linked_id) != current_item_ids.end()) { // Requested attachment is already worn. } else { // Requested attachment is not worn yet. items_to_add.push_back(*it); } } // S32 remove_count = objects_to_remove.size(); // S32 add_count = items_to_add.size(); // LL_INFOS() << "remove " << remove_count << " add " << add_count << LL_ENDL; // Remove everything in objects_to_remove // userRemoveMultipleAttachments(objects_to_remove); // [SL:KB] - Patch: Appearance-SyncAttach | Checked: 2010-09-22 (Catznip-2.2) if (!attach_only) { userRemoveMultipleAttachments(objects_to_remove); } // [/SL:KB] // Add everything in items_to_add userAttachMultipleAttachments(items_to_add); } void LLAgentWearables::userRemoveMultipleAttachments(llvo_vec_t& objects_to_remove) { if (!isAgentAvatarValid()) return; // [RLVa:KB] - Checked: 2010-03-04 (RLVa-1.2.0) // RELEASE-RLVa: [SL-3.4] Check our callers and verify that erasing elements from the passed vector won't break random things if ( (rlv_handler_t::isEnabled()) && (gRlvAttachmentLocks.hasLockedAttachmentPoint(RLV_LOCK_REMOVE)) ) { llvo_vec_t::iterator itObj = objects_to_remove.begin(); while (objects_to_remove.end() != itObj) { const LLViewerObject* pAttachObj = *itObj; if (gRlvAttachmentLocks.isLockedAttachment(pAttachObj)) { itObj = objects_to_remove.erase(itObj); // Fall-back code: re-add the attachment if it got removed from COF somehow (compensates for possible bugs elsewhere) bool fInCOF = LLAppearanceMgr::isLinkInCOF(pAttachObj->getAttachmentItemID()); RLV_ASSERT(fInCOF); if (!fInCOF) { LLAppearanceMgr::instance().registerAttachment(pAttachObj->getAttachmentItemID()); } } else { ++itObj; } } } // [/RLVa:KB] if (objects_to_remove.empty()) return; gMessageSystem->newMessage("ObjectDetach"); gMessageSystem->nextBlockFast(_PREHASH_AgentData); gMessageSystem->addUUIDFast(_PREHASH_AgentID, gAgent.getID()); gMessageSystem->addUUIDFast(_PREHASH_SessionID, gAgent.getSessionID()); for (llvo_vec_t::iterator it = objects_to_remove.begin(); it != objects_to_remove.end(); ++it) { LLViewerObject *objectp = *it; gMessageSystem->nextBlockFast(_PREHASH_ObjectData); gMessageSystem->addU32Fast(_PREHASH_ObjectLocalID, objectp->getLocalID()); } gMessageSystem->sendReliable(gAgent.getRegionHost()); } void LLAgentWearables::userAttachMultipleAttachments(LLInventoryModel::item_array_t& obj_item_array) { // [RLVa:KB] - Checked: 2011-05-22 (RLVa-1.3.1) static bool sInitialAttachmentsRequested = false; // RELEASE-RLVa: [SL-3.4] Check our callers and verify that erasing elements from the passed vector won't break random things if ( (rlv_handler_t::isEnabled()) && (sInitialAttachmentsRequested) && (gRlvAttachmentLocks.hasLockedAttachmentPoint(RLV_LOCK_ANY)) ) { // Fall-back code: everything should really already have been pruned before we get this far for (S32 idxItem = obj_item_array.size() - 1; idxItem >= 0; idxItem--) { const LLInventoryItem* pItem = obj_item_array.at(idxItem).get(); if (!gRlvAttachmentLocks.canAttach(pItem)) { obj_item_array.erase( obj_item_array.begin()+idxItem ); RLV_ASSERT(false); } } } // [/RLVa:KB] // Build a compound message to send all the objects that need to be rezzed. S32 obj_count = obj_item_array.size(); // Limit number of packets to send const S32 MAX_PACKETS_TO_SEND = 10; const S32 OBJECTS_PER_PACKET = 4; const S32 MAX_OBJECTS_TO_SEND = MAX_PACKETS_TO_SEND * OBJECTS_PER_PACKET; if( obj_count > MAX_OBJECTS_TO_SEND ) { obj_count = MAX_OBJECTS_TO_SEND; } // Create an id to keep the parts of the compound message together LLUUID compound_msg_id; compound_msg_id.generate(); LLMessageSystem* msg = gMessageSystem; for(S32 i = 0; i < obj_count; ++i) { if( 0 == (i % OBJECTS_PER_PACKET) ) { // Start a new message chunk msg->newMessageFast(_PREHASH_RezMultipleAttachmentsFromInv); msg->nextBlockFast(_PREHASH_AgentData); msg->addUUIDFast(_PREHASH_AgentID, gAgent.getID()); msg->addUUIDFast(_PREHASH_SessionID, gAgent.getSessionID()); msg->nextBlockFast(_PREHASH_HeaderData); msg->addUUIDFast(_PREHASH_CompoundMsgID, compound_msg_id ); msg->addU8Fast(_PREHASH_TotalObjects, obj_count ); msg->addBOOLFast(_PREHASH_FirstDetachAll, false ); } const LLInventoryItem* item = obj_item_array.at(i).get(); msg->nextBlockFast(_PREHASH_ObjectData ); msg->addUUIDFast(_PREHASH_ItemID, item->getLinkedUUID()); msg->addUUIDFast(_PREHASH_OwnerID, item->getPermissions().getOwner()); msg->addU8Fast(_PREHASH_AttachmentPt, 0 | ATTACHMENT_ADD); // Wear at the previous or default attachment point // [RLVa:KB] - Checked: 2011-05-22 (RLVa-1.3.1) if ( (rlv_handler_t::isEnabled()) && (sInitialAttachmentsRequested) && (gRlvAttachmentLocks.hasLockedAttachmentPoint(RLV_LOCK_ANY)) ) { RlvAttachmentLockWatchdog::instance().onWearAttachment(item, RLV_WEAR_ADD); } // [/RLVa:KB] pack_permissions_slam(msg, item->getFlags(), item->getPermissions()); msg->addStringFast(_PREHASH_Name, item->getName()); msg->addStringFast(_PREHASH_Description, item->getDescription()); if( (i+1 == obj_count) || ((OBJECTS_PER_PACKET-1) == (i % OBJECTS_PER_PACKET)) ) { // End of message chunk msg->sendReliable( gAgent.getRegion()->getHost() ); } } // [RLVa:KB] - Checked: 2011-05-22 (RLVa-1.3.1) sInitialAttachmentsRequested = true; // [/RLVa:KB] } void LLAgentWearables::checkWearablesLoaded() const { #ifdef SHOW_ASSERT U32 item_pend_count = itemUpdatePendingCount(); if (mWearablesLoaded) { llassert(item_pend_count==0); } #endif } // Returns false if the given wearable is already topmost/bottommost // (depending on closer_to_body parameter). bool LLAgentWearables::canMoveWearable(const LLUUID& item_id, bool closer_to_body) const { const LLWearable* wearable = getWearableFromItemID(item_id); if (!wearable) return false; LLWearableType::EType wtype = wearable->getType(); const LLWearable* marginal_wearable = closer_to_body ? getBottomWearable(wtype) : getTopWearable(wtype); if (!marginal_wearable) return false; return wearable != marginal_wearable; } BOOL LLAgentWearables::areWearablesLoaded() const { checkWearablesLoaded(); return mWearablesLoaded; } // MULTI-WEARABLE: DEPRECATED: item pending count relies on old messages that don't support multi-wearables. do not trust to be accurate void LLAgentWearables::updateWearablesLoaded() { mWearablesLoaded = (itemUpdatePendingCount()==0); if (mWearablesLoaded) { notifyLoadingFinished(); } } bool LLAgentWearables::canWearableBeRemoved(const LLViewerWearable* wearable) const { if (!wearable) return false; LLWearableType::EType type = wearable->getType(); // Make sure the user always has at least one shape, skin, eyes, and hair type currently worn. return !(((type == LLWearableType::WT_SHAPE) || (type == LLWearableType::WT_SKIN) || (type == LLWearableType::WT_HAIR) || (type == LLWearableType::WT_EYES)) && (getWearableCount(type) <= 1) ); } void LLAgentWearables::animateAllWearableParams(F32 delta, BOOL upload_bake) { for( S32 type = 0; type < LLWearableType::WT_COUNT; ++type ) { for (S32 count = 0; count < (S32)getWearableCount((LLWearableType::EType)type); ++count) { LLViewerWearable *wearable = getViewerWearable((LLWearableType::EType)type,count); llassert(wearable); if (wearable) { wearable->animateParams(delta, upload_bake); } } } } bool LLAgentWearables::moveWearable(const LLViewerInventoryItem* item, bool closer_to_body) { if (!item) return false; if (!item->isWearableType()) return false; LLWearableType::EType type = item->getWearableType(); U32 wearable_count = getWearableCount(type); if (0 == wearable_count) return false; const LLUUID& asset_id = item->getAssetUUID(); //nowhere to move if the wearable is already on any boundary (closest to the body/furthest from the body) if (closer_to_body) { LLViewerWearable* bottom_wearable = dynamic_cast<LLViewerWearable*>( getBottomWearable(type) ); if (bottom_wearable->getAssetID() == asset_id) { return false; } } else // !closer_to_body { LLViewerWearable* top_wearable = dynamic_cast<LLViewerWearable*>( getTopWearable(type) ); if (top_wearable->getAssetID() == asset_id) { return false; } } for (U32 i = 0; i < wearable_count; ++i) { LLViewerWearable* wearable = getViewerWearable(type, i); if (!wearable) continue; if (wearable->getAssetID() != asset_id) continue; //swapping wearables U32 swap_i = closer_to_body ? i-1 : i+1; swapWearables(type, i, swap_i); return true; } return false; } // static void LLAgentWearables::createWearable(LLWearableType::EType type, bool wear, const LLUUID& parent_id) { if (type == LLWearableType::WT_INVALID || type == LLWearableType::WT_NONE) return; LLViewerWearable* wearable = LLWearableList::instance().createNewWearable(type, gAgentAvatarp); LLAssetType::EType asset_type = wearable->getAssetType(); LLInventoryType::EType inv_type = LLInventoryType::IT_WEARABLE; LLPointer<LLInventoryCallback> cb = wear ? new LLBoostFuncInventoryCallback(wear_and_edit_cb) : NULL; LLUUID folder_id; if (parent_id.notNull()) { folder_id = parent_id; } else { LLFolderType::EType folder_type = LLFolderType::assetTypeToFolderType(asset_type); folder_id = gInventory.findCategoryUUIDForType(folder_type); } create_inventory_item(gAgent.getID(), gAgent.getSessionID(), folder_id, wearable->getTransactionID(), wearable->getName(), wearable->getDescription(), asset_type, inv_type, wearable->getType(), wearable->getPermissions().getMaskNextOwner(), cb); } // static void LLAgentWearables::editWearable(const LLUUID& item_id) { LLViewerInventoryItem* item = gInventory.getLinkedItem(item_id); if (!item) { LL_WARNS() << "Failed to get linked item" << LL_ENDL; return; } LLViewerWearable* wearable = gAgentWearables.getWearableFromItemID(item_id); if (!wearable) { LL_WARNS() << "Cannot get wearable" << LL_ENDL; return; } if (!gAgentWearables.isWearableModifiable(item->getUUID())) { LL_WARNS() << "Cannot modify wearable" << LL_ENDL; return; } const BOOL disable_camera_switch = LLWearableType::getDisableCameraSwitch(wearable->getType()); LLPanel* panel = LLFloaterSidePanelContainer::getPanel("appearance"); LLSidepanelAppearance::editWearable(wearable, panel, disable_camera_switch); } // Request editing the item after it gets worn. void LLAgentWearables::requestEditingWearable(const LLUUID& item_id) { mItemToEdit = gInventory.getLinkedItemID(item_id); } // Start editing the item if previously requested. void LLAgentWearables::editWearableIfRequested(const LLUUID& item_id) { if (mItemToEdit.notNull() && mItemToEdit == gInventory.getLinkedItemID(item_id)) { LLAgentWearables::editWearable(item_id); mItemToEdit.setNull(); } } void LLAgentWearables::updateServer() { sendAgentWearablesUpdate(); gAgent.sendAgentSetAppearance(); } void LLAgentWearables::populateMyOutfitsFolder(void) { LL_INFOS() << "starting outfit population" << LL_ENDL; const LLUUID& my_outfits_id = gInventory.findCategoryUUIDForType(LLFolderType::FT_MY_OUTFITS); LLLibraryOutfitsFetch* outfits = new LLLibraryOutfitsFetch(my_outfits_id); outfits->mMyOutfitsID = my_outfits_id; // Get the complete information on the items in the inventory and // setup an observer that will wait for that to happen. gInventory.addObserver(outfits); outfits->startFetch(); if (outfits->isFinished()) { outfits->done(); } } boost::signals2::connection LLAgentWearables::addLoadingStartedCallback(loading_started_callback_t cb) { return mLoadingStartedSignal.connect(cb); } boost::signals2::connection LLAgentWearables::addLoadedCallback(loaded_callback_t cb) { return mLoadedSignal.connect(cb); } bool LLAgentWearables::changeInProgress() const { return mCOFChangeInProgress; } // [SL:KB] - Patch: Appearance-InitialWearablesLoadedCallback | Checked: 2010-08-14 (Catznip-2.1) boost::signals2::connection LLAgentWearables::addInitialWearablesLoadedCallback(loaded_callback_t cb) { return mInitialWearablesLoadedSignal.connect(cb); } // [/SL:KB] void LLAgentWearables::notifyLoadingStarted() { mCOFChangeInProgress = true; mLoadingStartedSignal(); } void LLAgentWearables::notifyLoadingFinished() { mCOFChangeInProgress = false; mLoadedSignal(); } // EOF
// // This file is part of thttpd // Copyright (c) Antonino Calderone (antonino.calderone@gmail.com) // All rights reserved. // Licensed under the MIT License. // See COPYING file in the project root for full license information. // /* -------------------------------------------------------------------------- */ #include "OsSocketSupport.h" /* -------------------------------------------------------------------------- */ #ifdef _MSC_VER /* -------------------------------------------------------------------------- */ // MS Visual C++ /* -------------------------------------------------------------------------- */ #pragma comment(lib, "Ws2_32.lib") /* -------------------------------------------------------------------------- */ bool OsSocketSupport::initSocketLibrary(std::string& msg) { // Socket library initialization WORD wVersionRequested = WINSOCK_VERSION; WSADATA wsaData = { 0 }; bool ret = 0 == WSAStartup(wVersionRequested, &wsaData); if (!ret) msg = "WSAStartup failed"; return ret; } /* -------------------------------------------------------------------------- */ int OsSocketSupport::closeSocketFd(int sd) { return ::closesocket(sd); } /* -------------------------------------------------------------------------- */ #else /* -------------------------------------------------------------------------- */ // Other C++ platform /* -------------------------------------------------------------------------- */ bool OsSocketSupport::initSocketLibrary(std::string&) { return true; } /* -------------------------------------------------------------------------- */ int OsSocketSupport::closeSocketFd(int sd) { return ::close(sd); } /* -------------------------------------------------------------------------- */ #endif
//Copyright (C) 2022 Ehsan Kamrani //This file is licensed and distributed under MIT license // CustomBitmapButton.cpp : implementation file // #include "stdafx.h" #include "VandaEngine1.h" #include "CustomBitmapButton.h" // CCustomBitmapButton IMPLEMENT_DYNAMIC(CCustomBitmapButton, CButton) CCustomBitmapButton::CCustomBitmapButton() { } CCustomBitmapButton::~CCustomBitmapButton() { } BOOL CCustomBitmapButton::AutoLoad(UINT nID, CWnd* pParent) { // first attach the CBitmapButton to the dialog control if (!SubclassDlgItem(nID, pParent)) return FALSE; CString buttonName; GetWindowText(buttonName); ASSERT(!buttonName.IsEmpty()); // must provide a title LoadBitmaps(buttonName + _T("U"), buttonName + _T("D"), buttonName + _T("F"), buttonName + _T("X")); // we need at least the primary if (m_bitmap.m_hObject == NULL) return FALSE; // size to content //SizeToContent(); return TRUE; } // Draw the appropriate bitmap void CCustomBitmapButton::DrawItem(LPDRAWITEMSTRUCT lpDIS) { ASSERT(lpDIS != NULL); // must have at least the first bitmap loaded before calling DrawItem ASSERT(m_bitmap.m_hObject != NULL); // required // use the main bitmap for up, the selected bitmap for down CBitmap* pBitmap = &m_bitmap; UINT state = lpDIS->itemState; if ((state & ODS_SELECTED) && m_bitmapSel.m_hObject != NULL) pBitmap = &m_bitmapSel; else if ((state & ODS_FOCUS) && m_bitmapFocus.m_hObject != NULL) pBitmap = &m_bitmapFocus; // third image for focused else if ((state & ODS_DISABLED) && m_bitmapDisabled.m_hObject != NULL) pBitmap = &m_bitmapDisabled; // last image for disabled // draw the whole button CDC* pDC = CDC::FromHandle(lpDIS->hDC); CDC memDC; memDC.CreateCompatibleDC(pDC); CBitmap* pOld = memDC.SelectObject(pBitmap); if (pOld == NULL) return; // destructors will clean up CRect rect; rect.CopyRect(&lpDIS->rcItem); BITMAP bits; pBitmap->GetObject(sizeof(BITMAP),&bits); pDC->SetStretchBltMode(HALFTONE); pDC->StretchBlt(rect.left,rect.top,rect.Width(),rect.Height(), &memDC,0,0,bits.bmWidth, bits.bmHeight, SRCCOPY); memDC.SelectObject(pOld); } BEGIN_MESSAGE_MAP(CCustomBitmapButton, CButton) ON_WM_SETCURSOR() END_MESSAGE_MAP() // CCustomBitmapButton message handlers BOOL CCustomBitmapButton::OnSetCursor(CWnd* pWnd, UINT nHitTest, UINT message) { // TODO: Add your message handler code here and/or call default //::SetCursor( LoadCursor( AfxGetInstanceHandle(), MAKEINTRESOURCE( IDC_POINTER ) ) ); ::SetCursor( AfxGetApp()->LoadStandardCursor(IDC_HAND) ); return TRUE; return CBitmapButton::OnSetCursor(pWnd, nHitTest, message); }
/*============================================================================= Copyright (c) 2001-2011 Joel de Guzman Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ #ifndef BOOST_PP_IS_ITERATING #if !defined(FUSION_VECTOR_TIE_07192005_1242) #define FUSION_VECTOR_TIE_07192005_1242 #include <boost/preprocessor/iterate.hpp> #include <boost/preprocessor/cat.hpp> #include <boost/preprocessor/repetition/enum_params.hpp> #include <boost/preprocessor/repetition/enum_binary_params.hpp> #include <boost/preprocessor/repetition/enum_params_with_a_default.hpp> #include <boost/preprocessor/repetition/repeat_from_to.hpp> #include <boost/fusion/container/vector/vector.hpp> #if !defined(BOOST_FUSION_DONT_USE_PREPROCESSED_FILES) #include <boost/fusion/container/generation/detail/preprocessed/vector_tie.hpp> #else #if defined(__WAVE__) && defined(BOOST_FUSION_CREATE_PREPROCESSED_FILES) #pragma wave option(preserve: 2, line: 0, output: "detail/preprocessed/vector_tie" FUSION_MAX_VECTOR_SIZE_STR".hpp") #endif /*============================================================================= Copyright (c) 2001-2011 Joel de Guzman Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) This is an auto-generated file. Do not edit! ==============================================================================*/ #if defined(__WAVE__) && defined(BOOST_FUSION_CREATE_PREPROCESSED_FILES) #pragma wave option(preserve: 1) #endif namespace boost { namespace fusion { struct void_; namespace result_of { template < BOOST_PP_ENUM_PARAMS_WITH_A_DEFAULT( FUSION_MAX_VECTOR_SIZE, typename T, void_) , typename Extra = void_ > struct vector_tie; } #define BOOST_FUSION_REF(z, n, data) BOOST_PP_CAT(T, n)& #define BOOST_PP_FILENAME_1 <boost/fusion/container/generation/vector_tie.hpp> #define BOOST_PP_ITERATION_LIMITS (1, FUSION_MAX_VECTOR_SIZE) #include BOOST_PP_ITERATE() #undef BOOST_FUSION_REF }} #if defined(__WAVE__) && defined(BOOST_FUSION_CREATE_PREPROCESSED_FILES) #pragma wave option(output: null) #endif #endif // BOOST_FUSION_DONT_USE_PREPROCESSED_FILES #endif #else // defined(BOOST_PP_IS_ITERATING) /////////////////////////////////////////////////////////////////////////////// // // Preprocessor vertical repetition code // /////////////////////////////////////////////////////////////////////////////// #define N BOOST_PP_ITERATION() namespace result_of { template <BOOST_PP_ENUM_PARAMS(N, typename T)> #define TEXT(z, n, text) , text struct vector_tie< BOOST_PP_ENUM_PARAMS(N, T) BOOST_PP_REPEAT_FROM_TO(BOOST_PP_DEC(N), FUSION_MAX_VECTOR_SIZE, TEXT, void_) > #undef TEXT { typedef vector<BOOST_PP_ENUM(N, BOOST_FUSION_REF, _)> type; }; } template <BOOST_PP_ENUM_PARAMS(N, typename T)> BOOST_FUSION_GPU_ENABLED inline vector<BOOST_PP_ENUM(N, BOOST_FUSION_REF, _)> vector_tie(BOOST_PP_ENUM_BINARY_PARAMS(N, T, & _)) { return vector<BOOST_PP_ENUM(N, BOOST_FUSION_REF, _)>( BOOST_PP_ENUM_PARAMS(N, _)); } #undef N #endif // defined(BOOST_PP_IS_ITERATING)
// Copyright (C) 2016-2020 Internet Systems Consortium, Inc. ("ISC") // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. #include <config.h> #include <asiolink/asio_wrapper.h> #include <asiolink/io_service.h> #include <dhcpsrv/dhcpsrv_log.h> #include <dhcpsrv/timer_mgr.h> #include <exceptions/exceptions.h> #include <util/multi_threading_mgr.h> #include <boost/scoped_ptr.hpp> #include <functional> #include <utility> using namespace isc; using namespace isc::asiolink; using namespace isc::util; namespace { /// @brief Structure holding information for a single timer. /// /// This structure holds the instance of the watch socket being used to /// signal that the timer is "ready". It also holds the instance of the /// interval timer and other parameters pertaining to it. struct TimerInfo { /// @brief Instance of the interval timer. asiolink::IntervalTimer interval_timer_; /// @brief Holds the pointer to the callback supplied when registering /// the timer. asiolink::IntervalTimer::Callback user_callback_; /// @brief Interval timer interval supplied during registration. long interval_; /// @brief Interval timer scheduling mode supplied during registration. asiolink::IntervalTimer::Mode scheduling_mode_; /// @brief Constructor. /// /// @param io_service Reference to the IO service to be used by the /// interval timer created. /// @param user_callback Pointer to the callback function supplied /// during the timer registration. /// @param interval Timer interval in milliseconds. /// @param mode Interval timer scheduling mode. TimerInfo(asiolink::IOService& io_service, const asiolink::IntervalTimer::Callback& user_callback, const long interval, const asiolink::IntervalTimer::Mode& mode) : interval_timer_(io_service), user_callback_(user_callback), interval_(interval), scheduling_mode_(mode) { }; }; } namespace isc { namespace dhcp { /// @brief A type definition for the pointer to @c TimerInfo structure. typedef boost::shared_ptr<TimerInfo> TimerInfoPtr; /// @brief A type definition for the map holding timers configuration. typedef std::map<std::string, TimerInfoPtr> TimerInfoMap; /// @brief Implementation of the @c TimerMgr class TimerMgrImpl { public: /// @brief Constructor. TimerMgrImpl(); /// @brief Sets IO service to be used by the Timer Manager. /// /// @param io_service Pointer to the new IO service. void setIOService(const IOServicePtr& io_service); /// @brief Registers new timer in the @c TimerMgr. /// /// @param timer_name Unique name for the timer. /// @param callback Pointer to the callback function to be invoked /// when the timer elapses, e.g. function processing expired leases /// in the DHCP server. /// @param interval Timer interval in milliseconds. /// @param scheduling_mode Scheduling mode of the timer as described in /// @c asiolink::IntervalTimer::Mode. /// /// @throw BadValue if the timer name is invalid or duplicate. void registerTimer(const std::string& timer_name, const asiolink::IntervalTimer::Callback& callback, const long interval, const asiolink::IntervalTimer::Mode& scheduling_mode); /// @brief Unregisters specified timer. /// /// This method cancels the timer if it is setup and removes the timer /// from the internal collection of timers. /// /// @param timer_name Name of the timer to be unregistered. /// /// @throw BadValue if the specified timer hasn't been registered. void unregisterTimer(const std::string& timer_name); /// @brief Unregisters all timers. /// /// This method must be explicitly called prior to termination of the /// process. void unregisterTimers(); /// @brief Checks if the timer with a specified name has been registered. /// /// @param timer_name Name of the timer. /// @return true if the timer with the specified name has been registered, /// false otherwise. bool isTimerRegistered(const std::string& timer_name); /// @brief Returns the number of registered timers. size_t timersCount() const; /// @brief Schedules the execution of the interval timer. /// /// This method schedules the timer, i.e. the callback will be executed /// after specified interval elapses. The interval has been specified /// during timer registration. Depending on the mode selected during the /// timer registration, the callback will be executed once after it has /// been scheduled or until it is cancelled. Though, in the former case /// the timer can be re-scheduled in the callback function. /// /// @param timer_name Unique timer name. /// /// @throw BadValue if the timer hasn't been registered. void setup(const std::string& timer_name); /// @brief Cancels the execution of the interval timer. /// /// @param timer_name Unique timer name. /// /// @throw BadValue if the timer hasn't been registered. void cancel(const std::string& timer_name); private: /// @name Internal methods called while holding the mutex in multi threading /// mode. /// @brief Registers new timer in the @c TimerMgr. /// /// @param timer_name Unique name for the timer. /// @param callback Pointer to the callback function to be invoked /// when the timer elapses, e.g. function processing expired leases /// in the DHCP server. /// @param interval Timer interval in milliseconds. /// @param scheduling_mode Scheduling mode of the timer as described in /// @c asiolink::IntervalTimer::Mode. /// /// @throw BadValue if the timer name is invalid or duplicate. void registerTimerInternal(const std::string& timer_name, const asiolink::IntervalTimer::Callback& callback, const long interval, const asiolink::IntervalTimer::Mode& scheduling_mode); /// @brief Unregisters specified timer. /// /// This method cancels the timer if it is setup and removes the timer /// from the internal collection of timers. /// /// @param timer_name Name of the timer to be unregistered. /// /// @throw BadValue if the specified timer hasn't been registered. void unregisterTimerInternal(const std::string& timer_name); /// @brief Unregisters all timers. /// /// This method must be explicitly called prior to termination of the /// process. void unregisterTimersInternal(); /// @brief Schedules the execution of the interval timer. /// /// This method schedules the timer, i.e. the callback will be executed /// after specified interval elapses. The interval has been specified /// during timer registration. Depending on the mode selected during the /// timer registration, the callback will be executed once after it has /// been scheduled or until it is cancelled. Though, in the former case /// the timer can be re-scheduled in the callback function. /// /// @param timer_name Unique timer name. /// /// @throw BadValue if the timer hasn't been registered. void setupInternal(const std::string& timer_name); /// @brief Cancels the execution of the interval timer. /// /// @param timer_name Unique timer name. /// /// @throw BadValue if the timer hasn't been registered. void cancelInternal(const std::string& timer_name); /// @brief Callback function to be executed for each interval timer when /// its scheduled interval elapses. /// /// @param timer_name Unique timer name. void timerCallback(const std::string& timer_name); /// @brief Pointer to the io service. asiolink::IOServicePtr io_service_; /// @brief Holds mapping of the timer name to timer instance and other /// parameters pertaining to the timer. TimerInfoMap registered_timers_; /// @brief The mutex to protect the timer manager. boost::scoped_ptr<std::mutex> mutex_; }; TimerMgrImpl::TimerMgrImpl() : io_service_(new IOService()), registered_timers_(), mutex_(new std::mutex) { } void TimerMgrImpl::setIOService(const IOServicePtr& io_service) { if (!io_service) { isc_throw(BadValue, "IO service object must not be null for TimerMgr"); } io_service_ = io_service; } void TimerMgrImpl::registerTimer(const std::string& timer_name, const IntervalTimer::Callback& callback, const long interval, const IntervalTimer::Mode& scheduling_mode) { if (MultiThreadingMgr::instance().getMode()) { std::lock_guard<std::mutex> lock(*mutex_); registerTimerInternal(timer_name, callback, interval, scheduling_mode); } else { registerTimerInternal(timer_name, callback, interval, scheduling_mode); } } void TimerMgrImpl::registerTimerInternal(const std::string& timer_name, const IntervalTimer::Callback& callback, const long interval, const IntervalTimer::Mode& scheduling_mode) { // Timer name must not be empty. if (timer_name.empty()) { isc_throw(BadValue, "registered timer name must not be empty"); } // Must not register two timers under the same name. if (registered_timers_.find(timer_name) != registered_timers_.end()) { isc_throw(BadValue, "trying to register duplicate timer '" << timer_name << "'"); } // Create a structure holding the configuration for the timer. It will // create the instance if the IntervalTimer. It will also hold the // callback, interval and scheduling mode parameters. TimerInfoPtr timer_info(new TimerInfo(*io_service_, callback, interval, scheduling_mode)); // Actually register the timer. registered_timers_.insert(std::pair<std::string, TimerInfoPtr>(timer_name, timer_info)); } void TimerMgrImpl::unregisterTimer(const std::string& timer_name) { if (MultiThreadingMgr::instance().getMode()) { std::lock_guard<std::mutex> lock(*mutex_); unregisterTimerInternal(timer_name); } else { unregisterTimerInternal(timer_name); } } void TimerMgrImpl::unregisterTimerInternal(const std::string& timer_name) { // Find the timer with specified name. TimerInfoMap::iterator timer_info_it = registered_timers_.find(timer_name); // Check if the timer has been registered. if (timer_info_it == registered_timers_.end()) { isc_throw(BadValue, "unable to unregister non existing timer '" << timer_name << "'"); } // Cancel any pending asynchronous operation and stop the timer. cancelInternal(timer_name); // Remove the timer. registered_timers_.erase(timer_info_it); } void TimerMgrImpl::unregisterTimers() { if (MultiThreadingMgr::instance().getMode()) { std::lock_guard<std::mutex> lock(*mutex_); unregisterTimersInternal(); } else { unregisterTimersInternal(); } } void TimerMgrImpl::unregisterTimersInternal() { // Copy the map holding timers configuration. This is required so as // we don't cut the branch which we're sitting on when we will be // erasing the timers. We're going to iterate over the register timers // and remove them with the call to unregisterTimer function. But this // function will remove them from the register_timers_ map. If we // didn't work on the copy here, our iterator would invalidate. The // TimerInfo structure is copyable and since it is using the shared // pointers the copy is not expensive. Also this function is called when // the process terminates so it is not critical for performance. TimerInfoMap registered_timers_copy(registered_timers_); // Iterate over the existing timers and unregister them. for (TimerInfoMap::iterator timer_info_it = registered_timers_copy.begin(); timer_info_it != registered_timers_copy.end(); ++timer_info_it) { unregisterTimerInternal(timer_info_it->first); } } bool TimerMgrImpl::isTimerRegistered(const std::string& timer_name) { if (MultiThreadingMgr::instance().getMode()) { std::lock_guard<std::mutex> lock(*mutex_); return (registered_timers_.find(timer_name) != registered_timers_.end()); } else { return (registered_timers_.find(timer_name) != registered_timers_.end()); } } size_t TimerMgrImpl::timersCount() const { if (MultiThreadingMgr::instance().getMode()) { std::lock_guard<std::mutex> lock(*mutex_); return (registered_timers_.size()); } else { return (registered_timers_.size()); } } void TimerMgrImpl::setup(const std::string& timer_name) { if (MultiThreadingMgr::instance().getMode()) { std::lock_guard<std::mutex> lock(*mutex_); setupInternal(timer_name); } else { setupInternal(timer_name); } } void TimerMgrImpl::setupInternal(const std::string& timer_name) { // Check if the specified timer exists. TimerInfoMap::const_iterator timer_info_it = registered_timers_.find(timer_name); if (timer_info_it == registered_timers_.end()) { isc_throw(BadValue, "unable to setup timer '" << timer_name << "': " "no such timer registered"); } // Schedule the execution of the timer using the parameters supplied // during the registration. const TimerInfoPtr& timer_info = timer_info_it->second; IntervalTimer::Callback cb = std::bind(&TimerMgrImpl::timerCallback, this, timer_name); timer_info->interval_timer_.setup(cb, timer_info->interval_, timer_info->scheduling_mode_); } void TimerMgrImpl::cancel(const std::string& timer_name) { if (MultiThreadingMgr::instance().getMode()) { std::lock_guard<std::mutex> lock(*mutex_); cancelInternal(timer_name); } else { cancelInternal(timer_name); } } void TimerMgrImpl::cancelInternal(const std::string& timer_name) { // Find the timer of our interest. TimerInfoMap::const_iterator timer_info_it = registered_timers_.find(timer_name); if (timer_info_it == registered_timers_.end()) { isc_throw(BadValue, "unable to cancel timer '" << timer_name << "': " "no such timer registered"); } // Cancel the timer. timer_info_it->second->interval_timer_.cancel(); } void TimerMgrImpl::timerCallback(const std::string& timer_name) { // Find the specified timer setup. TimerInfoMap::iterator timer_info_it = registered_timers_.find(timer_name); if (timer_info_it != registered_timers_.end()) { // Running user-defined operation for the timer. Logging it // on the slightly lower debug level as there may be many // such traces. LOG_DEBUG(dhcpsrv_logger, DHCPSRV_DBG_TRACE_DETAIL, DHCPSRV_TIMERMGR_RUN_TIMER_OPERATION) .arg(timer_info_it->first); std::string error_string; try { timer_info_it->second->user_callback_(); } catch (const std::exception& ex){ error_string = ex.what(); } catch (...) { error_string = "unknown reason"; } // Exception was thrown. Log an error. if (!error_string.empty()) { LOG_ERROR(dhcpsrv_logger, DHCPSRV_TIMERMGR_CALLBACK_FAILED) .arg(timer_info_it->first) .arg(error_string); } } } const TimerMgrPtr& TimerMgr::instance() { static TimerMgrPtr timer_mgr(new TimerMgr()); return (timer_mgr); } TimerMgr::TimerMgr() : impl_(new TimerMgrImpl()) { } TimerMgr::~TimerMgr() { impl_->unregisterTimers(); } void TimerMgr::registerTimer(const std::string& timer_name, const IntervalTimer::Callback& callback, const long interval, const IntervalTimer::Mode& scheduling_mode) { LOG_DEBUG(dhcpsrv_logger, DHCPSRV_DBG_TRACE, DHCPSRV_TIMERMGR_REGISTER_TIMER) .arg(timer_name) .arg(interval); impl_->registerTimer(timer_name, callback, interval, scheduling_mode); } void TimerMgr::unregisterTimer(const std::string& timer_name) { LOG_DEBUG(dhcpsrv_logger, DHCPSRV_DBG_TRACE, DHCPSRV_TIMERMGR_UNREGISTER_TIMER) .arg(timer_name); impl_->unregisterTimer(timer_name); } void TimerMgr::unregisterTimers() { LOG_DEBUG(dhcpsrv_logger, DHCPSRV_DBG_TRACE, DHCPSRV_TIMERMGR_UNREGISTER_ALL_TIMERS); impl_->unregisterTimers(); } bool TimerMgr::isTimerRegistered(const std::string& timer_name) { return (impl_->isTimerRegistered(timer_name)); } size_t TimerMgr::timersCount() const { return (impl_->timersCount()); } void TimerMgr::setup(const std::string& timer_name) { LOG_DEBUG(dhcpsrv_logger, DHCPSRV_DBG_TRACE, DHCPSRV_TIMERMGR_START_TIMER) .arg(timer_name); impl_->setup(timer_name); } void TimerMgr::cancel(const std::string& timer_name) { LOG_DEBUG(dhcpsrv_logger, DHCPSRV_DBG_TRACE, DHCPSRV_TIMERMGR_STOP_TIMER) .arg(timer_name); impl_->cancel(timer_name); } void TimerMgr::setIOService(const IOServicePtr& io_service) { impl_->setIOService(io_service); } } // end of namespace isc::dhcp } // end of namespace isc
#include "pin.H" #include <stdio.h> #include <string.h> #include "types.h" #define INS_DELIMITER '\n' #define ADDR_CHARS sizeof(ADDRINT) #define RAW_TRACE_BUF_SIZE 512*Kb #define TRACE_LIMIT 256*Mb #define TRACE_NAME_LENGTH_LIMIT 128 #define THREADS_MAX_NO 256 static TLS_KEY tls_key = INVALID_TLS_KEY; PIN_LOCK pin_lock; static size_t spawned_threads_no; bool isFirstIns = true; const char* prog_name; trace_t* traces[THREADS_MAX_NO]; void recordInRawTrace(const char* buf, size_t buf_len, trace_t* trace) { memcpy(trace->buf + trace->cursor, buf, buf_len); trace->cursor += buf_len; } void printRawTrace(FILE* f) { size_t trace_no = 0; trace_t* trace = traces[trace_no]; while (trace != NULL) { for (size_t i = 0; i < trace->cursor; i++) { fputc(trace->buf[i], f); } trace = traces[++trace_no]; } } void INS_Analysis(char* disassembled_ins, UINT32 disassembled_ins_len, THREADID thread_idx) { trace_t* trace = (trace_t*)PIN_GetThreadData(tls_key, thread_idx); // Trace limit guard if (trace->cursor + disassembled_ins_len >= TRACE_LIMIT) return; recordInRawTrace(disassembled_ins, disassembled_ins_len, trace); } void INS_JumpAnalysis(ADDRINT target_branch, INT32 taken, THREADID thread_idx) { if (!taken) return; trace_t* trace = (trace_t*)PIN_GetThreadData(tls_key, thread_idx); /* Allocate enough space in order to save: - @ char (1 byte) - address in hex format (sizeof(ADDRINT) * 2 bytes) + '0x' prefix (2 bytes) - \n delimiter (1 byte) - 0 terminator (1 byte) */ size_t buf_len = (sizeof(ADDRINT) * 2 + 5); // Trace limit guard if (trace->cursor + buf_len >= TRACE_LIMIT) return; char* buf = (char*)calloc(1, sizeof(char) * buf_len); buf[0] = '\n'; buf[1] = '@'; sprintf(buf + 2, "%x", target_branch); recordInRawTrace(buf, buf_len, trace); } void Trace(TRACE trace, void* v) { // Let's whitelist the instrumented program only RTN rtn = TRACE_Rtn(trace); if (RTN_Valid(rtn)) { SEC sec = RTN_Sec(rtn); if (SEC_Valid(sec)) { IMG img = SEC_Img(sec); if (IMG_Valid(img)) { if (!strstr(IMG_Name(img).c_str(), prog_name)) { //fprintf(stdout, "[-] Ignoring %s\n", IMG_Name(img).c_str()); return; } //fprintf(stdout, "[+] Instrumenting %s <= %s\n", IMG_Name(img).c_str(), prog_name); //fflush(stdout); } else return; } else return; } else return; for (BBL bbl = TRACE_BblHead(trace); BBL_Valid(bbl); bbl = BBL_Next(bbl)) { for (INS ins = BBL_InsHead(bbl); INS_Valid(ins); ins = INS_Next(ins)) { string disassembled_ins_s = INS_Disassemble(ins); /* Allocate enough space to save - Disassembled instruction (n bytes) - INS_DELIMITER (1 byte) - 0 terminator (1 byte) */ uint32_t disassembled_ins_len = strlen(disassembled_ins_s.c_str()) + 2; char* disassembled_ins = (char*)calloc(1, sizeof(char) * (disassembled_ins_len)); disassembled_ins[0] = INS_DELIMITER; strcpy(disassembled_ins + 1, disassembled_ins_s.c_str()); if (isFirstIns) { isFirstIns = false; strcpy(disassembled_ins, disassembled_ins + 1); } INS_InsertCall(ins, IPOINT_BEFORE, (AFUNPTR)INS_Analysis, IARG_PTR, disassembled_ins, IARG_UINT32, disassembled_ins_len, IARG_THREAD_ID, IARG_END); if (INS_IsBranchOrCall(ins)) { INS_InsertCall(ins, IPOINT_BEFORE, (AFUNPTR)INS_JumpAnalysis, IARG_BRANCH_TARGET_ADDR, IARG_BRANCH_TAKEN, IARG_THREAD_ID, IARG_END); } } } } void ThreadStart(THREADID thread_idx, CONTEXT* ctx, INT32 flags, VOID* v) { fprintf(stdout, "[*] Spawned thread %d\n", thread_idx); fflush(stdout); /* Initialize a raw trace per thread */ PIN_GetLock(&pin_lock, thread_idx); trace_t* trace = (trace_t*)malloc(sizeof(trace_t*)); trace->buf = (char*)malloc(sizeof(char) * RAW_TRACE_BUF_SIZE); trace->cursor = 0; traces[thread_idx] = trace; if (PIN_SetThreadData(tls_key, trace, thread_idx) == FALSE) { fprintf(stderr, "[x] PIN_SetThreadData failed"); PIN_ExitProcess(1); } spawned_threads_no++; PIN_ReleaseLock(&pin_lock); } void ThreadFini(THREADID thread_idx, const CONTEXT* ctx, INT32 code, VOID* v) { fprintf(stdout, "[*] Finished thread %d\n", thread_idx); fflush(stdout); char filename[TRACE_NAME_LENGTH_LIMIT] = { 0 }; sprintf(filename, "trace_%d.out", thread_idx); FILE* out = fopen(filename, "w+"); printRawTrace(out); fprintf(stdout, "[+] Trace for thread #%d saved to %s\n", thread_idx, filename); } void Fini(INT32 code, VOID *v) { fprintf(stdout, "=======================\n"); fprintf(stdout, "Trace finished\n"); //fprintf(stdout, "Size: %d Kb\n", raw_trace->trace_size / (1024)); fprintf(stdout, "Threads spawned: %d\n", spawned_threads_no); fprintf(stdout, "=======================\n"); } int main(int argc, char *argv[]) { /* Init PIN */ if (PIN_Init(argc, argv)) { fprintf(stderr, "[x] An error occured while initiating PIN\n"); return 0; } /* Prepare TLS */ tls_key = PIN_CreateThreadDataKey(NULL); if (tls_key == INVALID_TLS_KEY) { fprintf(stderr, "[x] Number of already allocated keys reached the MAX_CLIENT_TLS_KEYS limit\n"); PIN_ExitProcess(1); } /* Prepare Lock */ PIN_InitLock(&pin_lock); prog_name = argv[argc - 1]; TRACE_AddInstrumentFunction(Trace, 0); PIN_AddThreadStartFunction(ThreadStart, 0); PIN_AddThreadFiniFunction(ThreadFini, 0); PIN_AddFiniFunction(Fini, 0); PIN_StartProgram(); return 0; }
#include "java.h" #include <string> #ifdef WIN32 #else #include <unistd.h> #endif #include "javaObject.h" #include "javaScope.h" #include "methodCallBaton.h" #include "node_NodeDynamicProxyClass.h" #include <node_version.h> #include <sstream> #include <nan.h> #define DYNAMIC_PROXY_JS_ERROR -4 #ifdef WIN32 typedef long threadId; #else typedef pthread_t threadId; #endif threadId v8ThreadId; /*static*/ Nan::Persistent<v8::FunctionTemplate> Java::s_ct; /*static*/ std::string Java::s_nativeBindingLocation; void my_sleep(int dur) { #ifdef WIN32 Sleep(dur); #else usleep(dur); #endif } threadId my_getThreadId() { #ifdef WIN32 return (long)GetCurrentThreadId(); #else return pthread_self(); #endif } bool v8ThreadIdEquals(threadId a, threadId b) { #ifdef WIN32 return a == b; #else return pthread_equal(a, b); #endif } /*static*/ void Java::Init(v8::Local<v8::Object> target) { Nan::HandleScope scope; v8ThreadId = my_getThreadId(); v8::Local<v8::FunctionTemplate> t = Nan::New<v8::FunctionTemplate>(New); s_ct.Reset(t); t->InstanceTemplate()->SetInternalFieldCount(1); t->SetClassName(Nan::New<v8::String>("Java").ToLocalChecked()); Nan::SetPrototypeMethod(t, "getClassLoader", getClassLoader); Nan::SetPrototypeMethod(t, "newInstance", newInstance); Nan::SetPrototypeMethod(t, "newInstanceSync", newInstanceSync); Nan::SetPrototypeMethod(t, "newProxy", newProxy); Nan::SetPrototypeMethod(t, "callStaticMethod", callStaticMethod); Nan::SetPrototypeMethod(t, "callStaticMethodSync", callStaticMethodSync); Nan::SetPrototypeMethod(t, "callMethod", callMethod); Nan::SetPrototypeMethod(t, "callMethodSync", callMethodSync); Nan::SetPrototypeMethod(t, "findClassSync", findClassSync); Nan::SetPrototypeMethod(t, "newArray", newArray); Nan::SetPrototypeMethod(t, "newByte", newByte); Nan::SetPrototypeMethod(t, "newShort", newShort); Nan::SetPrototypeMethod(t, "newLong", newLong); Nan::SetPrototypeMethod(t, "newChar", newChar); Nan::SetPrototypeMethod(t, "newFloat", newFloat); Nan::SetPrototypeMethod(t, "newDouble", newDouble); Nan::SetPrototypeMethod(t, "getStaticFieldValue", getStaticFieldValue); Nan::SetPrototypeMethod(t, "setStaticFieldValue", setStaticFieldValue); Nan::SetPrototypeMethod(t, "instanceOf", instanceOf); Nan::Set(target, Nan::New<v8::String>("Java").ToLocalChecked(), Nan::GetFunction(t).ToLocalChecked()); JavaProxyObject::init(); } NAN_METHOD(Java::New) { Nan::HandleScope scope; Java *self = new Java(); self->Wrap(info.This()); Nan::Set(self->handle(), Nan::New<v8::String>("classpath").ToLocalChecked(), Nan::New<v8::Array>()); Nan::Set(self->handle(), Nan::New<v8::String>("options").ToLocalChecked(), Nan::New<v8::Array>()); Nan::Set(self->handle(), Nan::New<v8::String>("nativeBindingLocation").ToLocalChecked(), Nan::New<v8::String>("Not Set").ToLocalChecked()); Nan::Set(self->handle(), Nan::New<v8::String>("asyncOptions").ToLocalChecked(), Nan::Null()); info.GetReturnValue().Set(info.This()); } Java::Java() { this->m_jvm = NULL; this->m_env = NULL; m_SyncSuffix = "Sync"; m_AsyncSuffix = ""; doSync = true; doAsync = true; doPromise = false; } Java::~Java() { this->destroyJVM(&this->m_jvm, &this->m_env); } v8::Local<v8::Value> Java::ensureJvm() { if(!m_jvm) { v8::Local<v8::Value> result = createJVM(&this->m_jvm, &this->m_env); assert(result->IsNull()); return result; } return Nan::Null(); } void Java::configureAsync(v8::Local<v8::Value>& asyncOptions) { v8::Local<v8::Object> asyncOptionsObj = asyncOptions.As<v8::Object>(); m_SyncSuffix = "invalid"; m_AsyncSuffix = "invalid"; m_PromiseSuffix = "invalid"; doSync = false; doAsync = false; doPromise = false; v8::MaybeLocal<v8::Value> maybeSuffixValue = Nan::Get(asyncOptionsObj, Nan::New<v8::String>("syncSuffix").ToLocalChecked()); v8::Local<v8::Value> suffixValue; if (maybeSuffixValue.ToLocal(&suffixValue) && suffixValue->IsString()) { v8::Local<v8::String> suffix = suffixValue->ToString(Nan::GetCurrentContext()).ToLocalChecked(); Nan::Utf8String utf8(suffix); m_SyncSuffix.assign(*utf8); doSync = true; } maybeSuffixValue = Nan::Get(asyncOptionsObj, Nan::New<v8::String>("asyncSuffix").ToLocalChecked()); if (maybeSuffixValue.ToLocal(&suffixValue) && suffixValue->IsString()) { v8::Local<v8::String> suffix = suffixValue->ToString(Nan::GetCurrentContext()).ToLocalChecked(); Nan::Utf8String utf8(suffix); m_AsyncSuffix.assign(*utf8); doAsync = true; } maybeSuffixValue = Nan::Get(asyncOptionsObj, Nan::New<v8::String>("promiseSuffix").ToLocalChecked()); if (maybeSuffixValue.ToLocal(&suffixValue) && suffixValue->IsString()) { v8::Local<v8::String> suffix = suffixValue->ToString(Nan::GetCurrentContext()).ToLocalChecked(); Nan::Utf8String utf8(suffix); m_PromiseSuffix.assign(*utf8); v8::MaybeLocal<v8::Value> maybePromisify = Nan::Get(asyncOptionsObj, Nan::New<v8::String>("promisify").ToLocalChecked()); v8::Local<v8::Value> promisify; if (maybePromisify.ToLocal(&promisify) && !promisify->IsFunction()) { fprintf(stderr, "asyncOptions.promisify must be a function"); assert(promisify->IsFunction()); } doPromise = true; } if (doSync && doAsync) { assert(m_SyncSuffix != m_AsyncSuffix); } if (doSync && doPromise) { assert(m_SyncSuffix != m_PromiseSuffix); } if (doAsync && doPromise) { assert(m_AsyncSuffix != m_PromiseSuffix); } m_asyncOptions.Reset(asyncOptionsObj); } v8::Local<v8::Value> Java::createJVM(JavaVM** jvm, JNIEnv** env) { v8::MaybeLocal<v8::Value> maybeAsyncOptions = Nan::Get(this->handle(), Nan::New<v8::String>("asyncOptions").ToLocalChecked()); v8::Local<v8::Value> asyncOptions; if (maybeAsyncOptions.ToLocal(&asyncOptions) && asyncOptions->IsObject()) { configureAsync(asyncOptions); } // setup classpath std::ostringstream classPath; classPath << "-Djava.class.path="; v8::MaybeLocal<v8::Value> maybeClassPathValue = Nan::Get(this->handle(), Nan::New<v8::String>("classpath").ToLocalChecked()); v8::Local<v8::Value> classPathValue; if(!maybeClassPathValue.ToLocal(&classPathValue) || !classPathValue->IsArray()) { return Nan::TypeError("Classpath must be an array"); } v8::Local<v8::Array> classPathArrayTemp = v8::Local<v8::Array>::Cast(classPathValue); m_classPathArray.Reset(classPathArrayTemp); for(uint32_t i=0; i<classPathArrayTemp->Length(); i++) { if(i != 0) { #ifdef WIN32 classPath << ";"; #else classPath << ":"; #endif } v8::Local<v8::Value> arrayItemValue = classPathArrayTemp->Get(Nan::GetCurrentContext(), i).ToLocalChecked(); if(!arrayItemValue->IsString()) { return Nan::TypeError("Classpath must only contain strings"); } v8::Local<v8::String> arrayItem = arrayItemValue->ToString(Nan::GetCurrentContext()).ToLocalChecked(); Nan::Utf8String arrayItemStr(arrayItem); classPath << *arrayItemStr; } // set the native binding location v8::Local<v8::Value> v8NativeBindingLocation = Nan::Get(this->handle(), Nan::New<v8::String>("nativeBindingLocation").ToLocalChecked()).FromMaybe(v8::Local<v8::Value>()); Nan::Utf8String nativeBindingLocationStr(v8NativeBindingLocation); s_nativeBindingLocation = *nativeBindingLocationStr; // get other options v8::Local<v8::Value> optionsValue = Nan::Get(this->handle(), Nan::New<v8::String>("options").ToLocalChecked()).FromMaybe(v8::Local<v8::Value>()); if(!optionsValue->IsArray()) { return Nan::TypeError("options must be an array"); } v8::Local<v8::Array> optionsArrayTemp = v8::Local<v8::Array>::Cast(optionsValue); m_optionsArray.Reset(optionsArrayTemp); // create vm options int vmOptionsCount = optionsArrayTemp->Length() + 1; JavaVMOption* vmOptions = new JavaVMOption[vmOptionsCount]; //printf("classPath: %s\n", classPath.str().c_str()); vmOptions[0].optionString = strdup(classPath.str().c_str()); for(uint32_t i=0; i<optionsArrayTemp->Length(); i++) { v8::Local<v8::Value> arrayItemValue = optionsArrayTemp->Get(Nan::GetCurrentContext(), i).ToLocalChecked(); if(!arrayItemValue->IsString()) { delete[] vmOptions; return Nan::TypeError("options must only contain strings"); } v8::Local<v8::String> arrayItem = arrayItemValue->ToString(Nan::GetCurrentContext()).ToLocalChecked(); Nan::Utf8String arrayItemStr(arrayItem); vmOptions[i+1].optionString = strdup(*arrayItemStr); } JavaVMInitArgs args; // The JNI invocation is documented to include a function JNI_GetDefaultJavaVMInitArgs that // was formerly called here. But the documentation from Oracle is confusing/contradictory. // 1) It claims that the caller must set args.version before calling JNI_GetDefaultJavaVMInitArgs, which // we did not do. // 2) The sample code provide at the top of the doc doesn't even call JNI_GetDefaultJavaVMInitArgs. // 3) The Oracle documentation for Java 6 through Java 8 all contain a comment "Note that in the JDK/JRE, there is no // longer any need to call JNI_GetDefaultJavaVMInitArgs." // 4) It seems that some platforms don't implement JNI_GetDefaultJavaVMInitArgs, or have // marked it deprecated. // Omitting the call to JNI_GetDefaultJavaVMInitArgs works fine on Mac and Linux with Java 7 and Java 8. // The Oracle documentation is here: // http://docs.oracle.com/javase/6/docs/technotes/guides/jni/spec/invocation.html // http://docs.oracle.com/javase/7/docs/technotes/guides/jni/spec/invocation.html // http://docs.oracle.com/javase/8/docs/technotes/guides/jni/spec/invocation.html args.version = JNI_BEST_VERSION; // JNI_GetDefaultJavaVMInitArgs(&args); // If this turns out to be necessary, it should be called here. args.ignoreUnrecognized = false; args.options = vmOptions; args.nOptions = vmOptionsCount; JavaVM* jvmTemp; JNI_CreateJavaVM(&jvmTemp, (void **)env, &args); *jvm = jvmTemp; delete [] vmOptions; m_classLoader = getSystemClassLoader(*env); v8::Local<v8::Value> onJvmCreated = Nan::Get(this->handle(), Nan::New<v8::String>("onJvmCreated").ToLocalChecked()).FromMaybe(v8::Local<v8::Value>()); // TODO: this handles sets put doesn't prevent modifing the underlying data. So java.classpath.push will still work which is invalid. Nan::SetAccessor(this->handle(), Nan::New<v8::String>("classpath").ToLocalChecked(), AccessorProhibitsOverwritingGetter, AccessorProhibitsOverwritingSetter); Nan::SetAccessor(this->handle(), Nan::New<v8::String>("options").ToLocalChecked(), AccessorProhibitsOverwritingGetter, AccessorProhibitsOverwritingSetter); Nan::SetAccessor(this->handle(), Nan::New<v8::String>("nativeBindingLocation").ToLocalChecked(), AccessorProhibitsOverwritingGetter, AccessorProhibitsOverwritingSetter); Nan::SetAccessor(this->handle(), Nan::New<v8::String>("asyncOptions").ToLocalChecked(), AccessorProhibitsOverwritingGetter, AccessorProhibitsOverwritingSetter); Nan::SetAccessor(this->handle(), Nan::New<v8::String>("onJvmCreated").ToLocalChecked(), AccessorProhibitsOverwritingGetter, AccessorProhibitsOverwritingSetter); if (onJvmCreated->IsFunction()) { v8::Local<v8::Function> onJvmCreatedFunc = onJvmCreated.As<v8::Function>(); v8::Local<v8::Object> context = Nan::New<v8::Object>(); Nan::Call(onJvmCreatedFunc, context, 0, NULL); } return Nan::Null(); } NAN_GETTER(Java::AccessorProhibitsOverwritingGetter) { Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); Nan::HandleScope scope; Nan::Utf8String nameStr(property); if(!strcmp("classpath", *nameStr)) { info.GetReturnValue().Set(Nan::New(self->m_classPathArray)); return; } else if(!strcmp("options", *nameStr)) { info.GetReturnValue().Set(Nan::New(self->m_optionsArray)); return; } else if(!strcmp("nativeBindingLocation", *nameStr)) { info.GetReturnValue().Set(Nan::New(Java::s_nativeBindingLocation.c_str()).ToLocalChecked()); return; } else if(!strcmp("asyncOptions", *nameStr)) { info.GetReturnValue().Set(Nan::New(self->m_asyncOptions)); return; } else if(!strcmp("onJvmCreated", *nameStr)) { // There is no good reason to get onJvmCreated, so just fall through to error below. } std::ostringstream errStr; errStr << "Invalid call to accessor " << *nameStr; info.GetReturnValue().Set(Nan::Error(errStr.str().c_str())); } NAN_SETTER(Java::AccessorProhibitsOverwritingSetter) { Nan::Utf8String nameStr(property); std::ostringstream errStr; errStr << "Cannot set " << *nameStr << " after calling any other java function."; Nan::ThrowError(errStr.str().c_str()); } void Java::destroyJVM(JavaVM** jvm, JNIEnv** env) { (*jvm)->DestroyJavaVM(); *jvm = NULL; *env = NULL; } NAN_METHOD(Java::getClassLoader) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); jclass classClazz = env->FindClass("java/lang/ClassLoader"); jmethodID class_getClassLoader = env->GetStaticMethodID(classClazz, "getSystemClassLoader", "()Ljava/lang/ClassLoader;"); jobject classLoader = env->CallStaticObjectMethod(classClazz, class_getClassLoader); checkJavaException(env); jobject result = env->NewGlobalRef(classLoader); info.GetReturnValue().Set(javaToV8(self, env, result)); } NAN_METHOD(Java::newInstance) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; int argsEnd = info.Length(); // arguments ARGS_FRONT_CLASSNAME(); ARGS_BACK_CALLBACK(); // find class jclass clazz = javaFindClass(env, className); if(clazz == NULL) { EXCEPTION_CALL_CALLBACK(self, "Could not find class " << className.c_str()); info.GetReturnValue().SetUndefined(); return; } // get method jobjectArray methodArgs = v8ToJava(env, info, argsStart, argsEnd); jobject method = javaFindConstructor(env, clazz, methodArgs); if(method == NULL) { std::string msg = methodNotFoundToString(env, clazz, className, true, info, argsStart, argsEnd); EXCEPTION_CALL_CALLBACK(self, msg); info.GetReturnValue().SetUndefined(); return; } // run NewInstanceBaton* baton = new NewInstanceBaton(self, clazz, method, methodArgs, callback); baton->run(); END_CALLBACK_FUNCTION("\"Constructor for class '" << className << "' called without a callback did you mean to use the Sync version?\""); } NAN_METHOD(Java::newInstanceSync) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; int argsEnd = info.Length(); // arguments ARGS_FRONT_CLASSNAME(); // find class jclass clazz = javaFindClass(env, className); if(clazz == NULL) { std::ostringstream errStr; errStr << "Could not create class " << className.c_str(); return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } // find method jobjectArray methodArgs = v8ToJava(env, info, argsStart, argsEnd); jobject method = javaFindConstructor(env, clazz, methodArgs); if(method == NULL) { std::string msg = methodNotFoundToString(env, clazz, className, true, info, argsStart, argsEnd); return Nan::ThrowError(javaExceptionToV8(self, env, msg)); } // run v8::Local<v8::Value> callback = Nan::Null(); NewInstanceBaton* baton = new NewInstanceBaton(self, clazz, method, methodArgs, callback); v8::Local<v8::Value> result = baton->runSync(); delete baton; if(result->IsNativeError()) { return Nan::ThrowError(result); } info.GetReturnValue().Set(result); } NAN_METHOD(Java::newProxy) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; ARGS_FRONT_STRING(interfaceName); ARGS_FRONT_OBJECT(functions); DynamicProxyData* dynamicProxyData = new DynamicProxyData(); dynamicProxyData->markerStart = DYNAMIC_PROXY_DATA_MARKER_START; dynamicProxyData->markerEnd = DYNAMIC_PROXY_DATA_MARKER_END; dynamicProxyData->java = self; dynamicProxyData->interfaceName = interfaceName; dynamicProxyData->functions.Reset(functions); // find NodeDynamicProxyClass std::string className = "node.NodeDynamicProxyClass"; jclass clazz = javaFindClass(env, className); if(clazz == NULL) { std::ostringstream errStr; errStr << "Could not create class node/NodeDynamicProxyClass"; delete dynamicProxyData; return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } // find constructor jclass objectClazz = env->FindClass("java/lang/Object"); jobjectArray methodArgs = env->NewObjectArray(2, objectClazz, NULL); env->SetObjectArrayElement(methodArgs, 0, v8ToJava(env, Nan::New<v8::String>(s_nativeBindingLocation.c_str()).ToLocalChecked())); env->SetObjectArrayElement(methodArgs, 1, longToJavaLongObj(env, (jlong)dynamicProxyData)); jobject method = javaFindConstructor(env, clazz, methodArgs); if(method == NULL) { std::ostringstream errStr; errStr << "Could not find constructor for class node/NodeDynamicProxyClass"; return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } // create the NodeDynamicProxyClass jclass constructorClazz = env->FindClass("java/lang/reflect/Constructor"); jmethodID constructor_newInstance = env->GetMethodID(constructorClazz, "newInstance", "([Ljava/lang/Object;)Ljava/lang/Object;"); //printf("invoke: %s\n", javaMethodCallToString(env, m_method, constructor_newInstance, m_args).c_str()); // run constructor jobject dynamicProxy = env->CallObjectMethod(method, constructor_newInstance, methodArgs); if(env->ExceptionCheck()) { std::ostringstream errStr; errStr << "Error creating class"; return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } jclass dynamicInterface = javaFindClass(env, interfaceName); if(dynamicInterface == NULL) { std::ostringstream errStr; errStr << "Could not find interface "; errStr << interfaceName; return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } jclass classClazz = env->FindClass("java/lang/Class"); jobjectArray classArray = env->NewObjectArray(1, classClazz, NULL); if(classArray == NULL) { std::ostringstream errStr; errStr << "Could not create class array for Proxy"; return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } env->SetObjectArrayElement(classArray, 0, dynamicInterface); jmethodID class_getClassLoader = env->GetMethodID(classClazz, "getClassLoader", "()Ljava/lang/ClassLoader;"); jobject classLoader = env->CallObjectMethod(dynamicInterface, class_getClassLoader); assertNoException(env); if(classLoader == NULL) { jclass objectClazz = env->FindClass("java/lang/Object"); jmethodID object_getClass = env->GetMethodID(objectClazz, "getClass", "()Ljava/lang/Class;"); jobject jobjClass = env->CallObjectMethod(dynamicProxy, object_getClass); checkJavaException(env); classLoader = env->CallObjectMethod(jobjClass, class_getClassLoader); checkJavaException(env); } if(classLoader == NULL) { std::ostringstream errStr; errStr << "Could not get classloader for Proxy"; return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } // create proxy instance jclass proxyClass = env->FindClass("java/lang/reflect/Proxy"); jmethodID proxy_newProxyInstance = env->GetStaticMethodID(proxyClass, "newProxyInstance", "(Ljava/lang/ClassLoader;[Ljava/lang/Class;Ljava/lang/reflect/InvocationHandler;)Ljava/lang/Object;"); jobject proxyInstance = env->CallStaticObjectMethod(proxyClass, proxy_newProxyInstance, classLoader, classArray, dynamicProxy); if(env->ExceptionCheck()) { std::ostringstream errStr; errStr << "Error creating java.lang.reflect.Proxy"; return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } v8::Local<v8::Value> result = javaToV8(self, env, proxyInstance, dynamicProxyData); dynamicProxyData->jsObject.Reset(result); info.GetReturnValue().Set(result); } NAN_METHOD(Java::callStaticMethod) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; int argsEnd = info.Length(); // arguments ARGS_FRONT_CLASSNAME(); ARGS_FRONT_STRING(methodName); ARGS_BACK_CALLBACK(); // find class jclass clazz = javaFindClass(env, className); if(clazz == NULL) { EXCEPTION_CALL_CALLBACK(self, "Could not create class " << className.c_str()); info.GetReturnValue().SetUndefined(); return; } // find method jobjectArray methodArgs = v8ToJava(env, info, argsStart, argsEnd); jobject method = javaFindMethod(env, clazz, methodName, methodArgs); if(method == NULL) { std::string msg = methodNotFoundToString(env, clazz, methodName, false, info, argsStart, argsEnd); EXCEPTION_CALL_CALLBACK(self, msg); info.GetReturnValue().SetUndefined(); return; } // run StaticMethodCallBaton* baton = new StaticMethodCallBaton(self, clazz, method, methodArgs, callback); baton->run(); END_CALLBACK_FUNCTION("\"Static method '" << methodName << "' called without a callback did you mean to use the Sync version?\""); } NAN_METHOD(Java::callStaticMethodSync) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; int argsEnd = info.Length(); // arguments ARGS_FRONT_CLASSNAME(); ARGS_FRONT_STRING(methodName); // find class jclass clazz = javaFindClass(env, className); if(clazz == NULL) { std::ostringstream errStr; errStr << "Could not create class " << className.c_str(); return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } // find method jobjectArray methodArgs = v8ToJava(env, info, argsStart, argsEnd); jobject method = javaFindMethod(env, clazz, methodName, methodArgs); if(method == NULL) { std::string msg = methodNotFoundToString(env, clazz, methodName, false, info, argsStart, argsEnd); return Nan::ThrowError(javaExceptionToV8(self, env, msg)); } // run v8::Local<v8::Value> callback = Nan::Null(); StaticMethodCallBaton* baton = new StaticMethodCallBaton(self, clazz, method, methodArgs, callback); v8::Local<v8::Value> result = baton->runSync(); delete baton; if(result->IsNativeError()) { Nan::ThrowError(result); return; } info.GetReturnValue().Set(result); } NAN_METHOD(Java::callMethodSync) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; int argsEnd = info.Length(); // arguments ARGS_FRONT_OBJECT(instanceObj); ARGS_FRONT_STRING(methodName); JavaObject* javaObj = Nan::ObjectWrap::Unwrap<JavaObject>(instanceObj); // find method jclass clazz = javaObj->getClass(); jobjectArray methodArgs = v8ToJava(env, info, argsStart, argsEnd); jobject method = javaFindMethod(env, clazz, methodName, methodArgs); if(method == NULL) { std::string msg = methodNotFoundToString(env, clazz, methodName, false, info, argsStart, argsEnd); return Nan::ThrowError(javaExceptionToV8(self, env, msg)); } // run v8::Local<v8::Value> callback = Nan::Null(); InstanceMethodCallBaton* baton = new InstanceMethodCallBaton(self, javaObj, method, methodArgs, callback); v8::Local<v8::Value> result = baton->runSync(); delete baton; if(result->IsNativeError()) { return Nan::ThrowError(result); } info.GetReturnValue().Set(result); } NAN_METHOD(Java::callMethod) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; int argsEnd = info.Length(); // arguments ARGS_FRONT_OBJECT(instanceObj); ARGS_FRONT_STRING(methodName); ARGS_BACK_CALLBACK(); JavaObject* javaObj = Nan::ObjectWrap::Unwrap<JavaObject>(instanceObj); // find method jclass clazz = javaObj->getClass(); jobjectArray methodArgs = v8ToJava(env, info, argsStart, argsEnd); jobject method = javaFindMethod(env, clazz, methodName, methodArgs); if(method == NULL) { std::string msg = methodNotFoundToString(env, clazz, methodName, false, info, argsStart, argsEnd); EXCEPTION_CALL_CALLBACK(self, msg); info.GetReturnValue().SetUndefined(); return; } // run InstanceMethodCallBaton* baton = new InstanceMethodCallBaton(self, javaObj, method, methodArgs, callback); baton->run(); END_CALLBACK_FUNCTION("\"method '" << methodName << "' called without a callback did you mean to use the Sync version?\""); } NAN_METHOD(Java::findClassSync) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; // arguments ARGS_FRONT_CLASSNAME(); // find class jclass clazz = javaFindClass(env, className); if(clazz == NULL) { std::ostringstream errStr; errStr << "Could not create class " << className.c_str(); return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } // run v8::Local<v8::Value> result = javaToV8(self, env, clazz); info.GetReturnValue().Set(result); } NAN_METHOD(Java::newArray) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; // arguments ARGS_FRONT_CLASSNAME(); // argument - array if(info.Length() < argsStart+1 || !info[argsStart]->IsArray()) { std::ostringstream errStr; errStr << "Argument " << (argsStart+1) << " must be an array"; return Nan::ThrowError(Nan::TypeError(errStr.str().c_str())); } v8::Local<v8::Array> arrayObj = v8::Local<v8::Array>::Cast(info[argsStart]); // find class and method jarray results; if(strcmp(className.c_str(), "byte") == 0) { results = env->NewByteArray(arrayObj->Length()); for(uint32_t i=0; i<arrayObj->Length(); i++) { v8::Local<v8::Value> item = arrayObj->Get(Nan::GetCurrentContext(), i).ToLocalChecked(); jobject val = v8ToJava(env, item); jclass byteClazz = env->FindClass("java/lang/Byte"); jmethodID byte_byteValue = env->GetMethodID(byteClazz, "byteValue", "()B"); jbyte byteValues[1]; byteValues[0] = env->CallByteMethod(val, byte_byteValue); assertNoException(env); env->SetByteArrayRegion((jbyteArray)results, i, 1, byteValues); } } else if(strcmp(className.c_str(), "char") == 0) { results = env->NewCharArray(arrayObj->Length()); for(uint32_t i=0; i<arrayObj->Length(); i++) { v8::Local<v8::Value> item = arrayObj->Get(Nan::GetCurrentContext(), i).ToLocalChecked(); jobject val = v8ToJava(env, item); jclass stringClazz = env->FindClass("java/lang/String"); jmethodID string_charAt = env->GetMethodID(stringClazz, "charAt", "(I)C"); jchar itemValues[1]; itemValues[0] = env->CallCharMethod(val, string_charAt, 0); checkJavaException(env); env->SetCharArrayRegion((jcharArray)results, i, 1, itemValues); } } else if(strcmp(className.c_str(), "short") == 0) { results = env->NewShortArray(arrayObj->Length()); for(uint32_t i=0; i<arrayObj->Length(); i++) { v8::Local<v8::Value> item = arrayObj->Get(Nan::GetCurrentContext(), i).ToLocalChecked(); jobject val = v8ToJava(env, item); jclass shortClazz = env->FindClass("java/lang/Short"); jmethodID short_shortValue = env->GetMethodID(shortClazz, "shortValue", "()S"); jshort shortValues[1]; shortValues[0] = env->CallShortMethod(val, short_shortValue); assertNoException(env); env->SetShortArrayRegion((jshortArray)results, i, 1, shortValues); } } else if(strcmp(className.c_str(), "double") == 0) { results = env->NewDoubleArray(arrayObj->Length()); for(uint32_t i=0; i<arrayObj->Length(); i++) { v8::Local<v8::Value> item = arrayObj->Get(Nan::GetCurrentContext(), i).ToLocalChecked(); jobject val = v8ToJava(env, item); jclass doubleClazz = env->FindClass("java/lang/Double"); jmethodID double_doubleValue = env->GetMethodID(doubleClazz, "doubleValue", "()D"); jdouble doubleValues[1]; doubleValues[0] = env->CallDoubleMethod(val, double_doubleValue); assertNoException(env); env->SetDoubleArrayRegion((jdoubleArray)results, i, 1, doubleValues); } } else if(strcmp(className.c_str(), "int") == 0) { results = env->NewIntArray(arrayObj->Length()); for(uint32_t i=0; i<arrayObj->Length(); i++) { v8::Local<v8::Value> item = arrayObj->Get(Nan::GetCurrentContext(), i).ToLocalChecked(); jobject val = v8ToJava(env, item); jclass integerClazz = env->FindClass("java/lang/Integer"); jmethodID integer_intValue = env->GetMethodID(integerClazz, "intValue", "()I"); jint intValues[1]; intValues[0] = env->CallIntMethod(val, integer_intValue); assertNoException(env); env->SetIntArrayRegion((jintArray)results, i, 1, intValues); } } else if(strcmp(className.c_str(), "float") == 0) { results = env->NewFloatArray(arrayObj->Length()); for(uint32_t i=0; i<arrayObj->Length(); i++) { v8::Local<v8::Value> item = arrayObj->Get(Nan::GetCurrentContext(), i).ToLocalChecked(); jobject val = v8ToJava(env, item); jclass floatClazz = env->FindClass("java/lang/Float"); jmethodID float_floatValue = env->GetMethodID(floatClazz, "floatValue", "()F"); jfloat floatValues[1]; floatValues[0] = env->CallFloatMethod(val, float_floatValue); checkJavaException(env); env->SetFloatArrayRegion((jfloatArray)results, i, 1, floatValues); } } else if(strcmp(className.c_str(), "boolean") == 0) { results = env->NewBooleanArray(arrayObj->Length()); for(uint32_t i=0; i<arrayObj->Length(); i++) { v8::Local<v8::Value> item = arrayObj->Get(Nan::GetCurrentContext(), i).ToLocalChecked(); jobject val = v8ToJava(env, item); jclass booleanClazz = env->FindClass("java/lang/Boolean"); jmethodID boolean_booleanValue = env->GetMethodID(booleanClazz, "booleanValue", "()Z"); jboolean booleanValues[1]; booleanValues[0] = env->CallBooleanMethod(val, boolean_booleanValue); checkJavaException(env); env->SetBooleanArrayRegion((jbooleanArray)results, i, 1, booleanValues); } } else { jclass clazz = javaFindClass(env, className); if(clazz == NULL) { std::ostringstream errStr; errStr << "Could not create class " << className.c_str(); return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } // create array results = env->NewObjectArray(arrayObj->Length(), clazz, NULL); for(uint32_t i=0; i<arrayObj->Length(); i++) { v8::Local<v8::Value> item = arrayObj->Get(Nan::GetCurrentContext(), i).ToLocalChecked(); jobject val = v8ToJava(env, item); env->SetObjectArrayElement((jobjectArray)results, i, val); if(env->ExceptionOccurred()) { std::ostringstream errStr; Nan::Utf8String valStr(item); errStr << "Could not add item \"" << *valStr << "\" to array."; return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } } } info.GetReturnValue().Set(JavaObject::New(self, results)); } NAN_METHOD(Java::newByte) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); if(info.Length() != 1) { return Nan::ThrowError(Nan::TypeError("newByte only takes 1 argument")); } // argument - value if(!info[0]->IsNumber()) { return Nan::ThrowError(Nan::TypeError("Argument 1 must be a number")); } jbyte val = Nan::To<int32_t>(info[0]).FromJust(); jclass clazz = env->FindClass("java/lang/Byte"); jmethodID constructor = env->GetMethodID(clazz, "<init>", "(B)V"); jobject newObj = env->NewObject(clazz, constructor, val); info.GetReturnValue().Set(JavaObject::New(self, newObj)); return; } NAN_METHOD(Java::newShort) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); if(info.Length() != 1) { return Nan::ThrowError(Nan::TypeError("newShort only takes 1 argument")); } // argument - value if(!info[0]->IsNumber()) { return Nan::ThrowError(Nan::TypeError("Argument 1 must be a number")); } jshort val = Nan::To<int32_t>(info[0]).FromJust(); jclass clazz = env->FindClass("java/lang/Short"); jmethodID constructor = env->GetMethodID(clazz, "<init>", "(S)V"); jobject newObj = env->NewObject(clazz, constructor, val); info.GetReturnValue().Set(JavaObject::New(self, newObj)); } NAN_METHOD(Java::newLong) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); if(info.Length() != 1) { return Nan::ThrowError(Nan::TypeError("newLong only takes 1 argument")); } // argument - value if(!info[0]->IsNumber()) { return Nan::ThrowError(Nan::TypeError("Argument 1 must be a number")); } jlong val = Nan::To<int64_t>(info[0]).FromJust(); jclass clazz = env->FindClass("java/lang/Long"); jmethodID constructor = env->GetMethodID(clazz, "<init>", "(J)V"); jobject newObj = env->NewObject(clazz, constructor, val); info.GetReturnValue().Set(JavaObject::New(self, newObj)); } NAN_METHOD(Java::newChar) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); if(info.Length() != 1) { return Nan::ThrowError(Nan::TypeError("newChar only takes 1 argument")); } // argument - value jchar charVal; if(info[0]->IsNumber()) { charVal = (jchar)Nan::To<int32_t>(info[0]).FromJust(); } else if(info[0]->IsString()) { v8::Local<v8::String> val = info[0]->ToString(Nan::GetCurrentContext()).ToLocalChecked(); if(val->Length() != 1) { return Nan::ThrowError(Nan::TypeError("Argument 1 must be a string of 1 character.")); } std::string strVal = std::string(*Nan::Utf8String(val)); charVal = (jchar)strVal[0]; } else { return Nan::ThrowError(Nan::TypeError("Argument 1 must be a number or string")); } jclass clazz = env->FindClass("java/lang/Character"); jmethodID constructor = env->GetMethodID(clazz, "<init>", "(C)V"); jobject newObj = env->NewObject(clazz, constructor, charVal); info.GetReturnValue().Set(JavaObject::New(self, newObj)); } NAN_METHOD(Java::newFloat) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); if(info.Length() != 1) { return Nan::ThrowError(Nan::TypeError("newFloat only takes 1 argument")); } else if(!info[0]->IsNumber()) { return Nan::ThrowError(Nan::TypeError("Argument 1 must be a number")); } jfloat val = (jfloat)Nan::To<double>(info[0]).FromJust(); jclass clazz = env->FindClass("java/lang/Float"); jmethodID constructor = env->GetMethodID(clazz, "<init>", "(F)V"); jobject newObj = env->NewObject(clazz, constructor, val); info.GetReturnValue().Set(JavaObject::New(self, newObj)); } NAN_METHOD(Java::newDouble) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); if(info.Length() != 1) { return Nan::ThrowError(Nan::TypeError("newDouble only takes 1 argument")); } else if(!info[0]->IsNumber()) { return Nan::ThrowError(Nan::TypeError("Argument 1 must be a number")); } jdouble val = (jdouble)Nan::To<double>(info[0]).FromJust(); jclass clazz = env->FindClass("java/lang/Double"); jmethodID constructor = env->GetMethodID(clazz, "<init>", "(D)V"); jobject newObj = env->NewObject(clazz, constructor, val); info.GetReturnValue().Set(JavaObject::New(self, newObj)); } NAN_METHOD(Java::getStaticFieldValue) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; // arguments ARGS_FRONT_CLASSNAME(); ARGS_FRONT_STRING(fieldName); // find the class jclass clazz = javaFindClass(env, className); if(clazz == NULL) { std::ostringstream errStr; errStr << "Could not create class " << className.c_str(); return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } // get the field jobject field = javaFindField(env, clazz, fieldName); if(field == NULL) { std::ostringstream errStr; errStr << "Could not find field \"" << fieldName.c_str() << "\" on class \"" << className.c_str() << "\""; return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } jclass fieldClazz = env->FindClass("java/lang/reflect/Field"); jmethodID field_get = env->GetMethodID(fieldClazz, "get", "(Ljava/lang/Object;)Ljava/lang/Object;"); // get field value jobject val = env->CallObjectMethod(field, field_get, NULL); if(env->ExceptionOccurred()) { std::ostringstream errStr; errStr << "Could not get field " << fieldName.c_str() << " on class " << className.c_str(); return Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); } info.GetReturnValue().Set(javaToV8(self, env, val)); } NAN_METHOD(Java::setStaticFieldValue) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; // arguments ARGS_FRONT_CLASSNAME(); ARGS_FRONT_STRING(fieldName); // argument - new value if(info.Length() < argsStart+1) { std::ostringstream errStr; errStr << "setStaticFieldValue requires " << (argsStart+1) << " arguments"; Nan::ThrowError(Nan::TypeError(errStr.str().c_str())); return; } jobject newValue = v8ToJava(env, info[argsStart]); argsStart++; // find the class jclass clazz = javaFindClass(env, className); if(clazz == NULL) { std::ostringstream errStr; errStr << "Could not create class " << className.c_str(); Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); return; } // get the field jobject field = javaFindField(env, clazz, fieldName); if(field == NULL) { std::ostringstream errStr; errStr << "Could not find field \"" << fieldName.c_str() << "\" on class \"" << className.c_str() << "\""; Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); return; } jclass fieldClazz = env->FindClass("java/lang/reflect/Field"); jmethodID field_set = env->GetMethodID(fieldClazz, "set", "(Ljava/lang/Object;Ljava/lang/Object;)V"); //printf("newValue: %s\n", javaObjectToString(env, newValue).c_str()); // set field value env->CallObjectMethod(field, field_set, NULL, newValue); if(env->ExceptionOccurred()) { std::ostringstream errStr; errStr << "Could not set field " << fieldName.c_str() << " on class " << className.c_str(); Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); return; } info.GetReturnValue().SetUndefined(); } NAN_METHOD(Java::instanceOf) { Nan::HandleScope scope; Java* self = Nan::ObjectWrap::Unwrap<Java>(info.This()); v8::Local<v8::Value> ensureJvmResults = self->ensureJvm(); if(!ensureJvmResults->IsNull()) { info.GetReturnValue().Set(ensureJvmResults); return; } JNIEnv* env = self->getJavaEnv(); JavaScope javaScope(env); int argsStart = 0; ARGS_FRONT_OBJECT(obj); ARGS_FRONT_STRING(className); jobject instance = v8ToJava(env, obj); if (!instance) { // not even a Java object info.GetReturnValue().Set(Nan::New<v8::Boolean>(false)); return; } jclass clazz = javaFindClass(env, className); if(!clazz) { std::ostringstream errStr; errStr << "Could not find class " << className.c_str(); Nan::ThrowError(javaExceptionToV8(self, env, errStr.str())); return; } jboolean res = env->IsInstanceOf(instance, clazz); info.GetReturnValue().Set(Nan::New<v8::Boolean>(res)); } void EIO_CallJs(uv_work_t* req) { } template <typename T> std::string to_string(T value) { std::ostringstream os; os << value; return os.str(); } #if NODE_MINOR_VERSION >= 10 void EIO_AfterCallJs(uv_work_t* req, int status) { #else void EIO_AfterCallJs(uv_work_t* req) { #endif DynamicProxyData* dynamicProxyData = static_cast<DynamicProxyData*>(req->data); if(!dynamicProxyDataVerify(dynamicProxyData)) { return; } dynamicProxyData->result = NULL; JNIEnv* env; int ret = dynamicProxyData->java->getJvm()->GetEnv((void**)&env, JNI_BEST_VERSION); if (ret != JNI_OK) { dynamicProxyData->throwableClass = "java/lang/IllegalStateException"; dynamicProxyData->throwableMessage = "Could not retrieve JNIEnv: jvm->GetEnv returned " + to_string<int>(ret); dynamicProxyData->done = DYNAMIC_PROXY_JS_ERROR; return; } Nan::HandleScope scope; v8::Array* v8Args; v8::Local<v8::Function> fn; v8::Local<v8::Value>* argv; int argc; int i; v8::Local<v8::Value> v8Result; jobject javaResult; v8::Local<v8::Object> dynamicProxyDataFunctions = Nan::New(dynamicProxyData->functions); v8::Local<v8::Value> fnObj = dynamicProxyDataFunctions->Get(Nan::GetCurrentContext(), Nan::New<v8::String>(dynamicProxyData->methodName.c_str()).ToLocalChecked()).ToLocalChecked(); if(fnObj->IsUndefined() || fnObj->IsNull()) { dynamicProxyData->throwableClass = "java/lang/NoSuchMethodError"; dynamicProxyData->throwableMessage = "Could not find js function " + dynamicProxyData->methodName; dynamicProxyData->done = DYNAMIC_PROXY_JS_ERROR; return; } if(!fnObj->IsFunction()) { dynamicProxyData->throwableClass = "java/lang/IllegalStateException"; dynamicProxyData->throwableMessage = dynamicProxyData->methodName + " is not a function"; dynamicProxyData->done = DYNAMIC_PROXY_JS_ERROR; return; } fn = fnObj.As<v8::Function>(); if(dynamicProxyData->args) { v8Args = v8::Array::Cast(*javaArrayToV8(dynamicProxyData->java, env, dynamicProxyData->args)); argc = v8Args->Length(); } else { argc = 0; } argv = new v8::Local<v8::Value>[argc]; for(i=0; i<argc; i++) { argv[i] = v8Args->Get(Nan::GetCurrentContext(), i).ToLocalChecked(); } Nan::TryCatch tryCatch; tryCatch.SetCaptureMessage(true); v8Result = Nan::Call(fn, dynamicProxyDataFunctions, argc, argv).FromMaybe(v8::Local<v8::Value>()); delete[] argv; if (tryCatch.HasCaught()) { dynamicProxyData->throwableClass = "node/NodeJsException"; Nan::Utf8String message(tryCatch.Message()->Get()); dynamicProxyData->throwableMessage = std::string(*message); tryCatch.Reset(); dynamicProxyData->done = DYNAMIC_PROXY_JS_ERROR; return; } if(!dynamicProxyDataVerify(dynamicProxyData)) { return; } javaResult = v8ToJava(env, v8Result); if(javaResult == NULL) { dynamicProxyData->result = NULL; } else { dynamicProxyData->result = env->NewGlobalRef(javaResult); } dynamicProxyData->done = true; } void throwNewThrowable(JNIEnv* env, const char * excClassName, std::string msg) { jclass newExcCls = env->FindClass(excClassName); jthrowable throwable = env->ExceptionOccurred(); if (throwable != NULL) { env->Throw(throwable); // this should only be Errors, according to the docs } env->ThrowNew(newExcCls, msg.c_str()); } JNIEXPORT jobject JNICALL Java_node_NodeDynamicProxyClass_callJs(JNIEnv *env, jobject src, jlong ptr, jobject method, jobjectArray args) { threadId myThreadId = my_getThreadId(); bool hasArgsGlobalRef = false; // args needs to be global, you can't send env across thread boundaries DynamicProxyData* dynamicProxyData = (DynamicProxyData*)ptr; dynamicProxyData->args = args; dynamicProxyData->done = false; dynamicProxyData->result = NULL; dynamicProxyData->throwableClass = ""; dynamicProxyData->throwableMessage = ""; jclass methodClazz = env->FindClass("java/lang/reflect/Method"); jmethodID method_getName = env->GetMethodID(methodClazz, "getName", "()Ljava/lang/String;"); dynamicProxyData->methodName = javaObjectToString(env, env->CallObjectMethod(method, method_getName)); assertNoException(env); uv_work_t* req = new uv_work_t(); req->data = dynamicProxyData; if(v8ThreadIdEquals(myThreadId, v8ThreadId)) { #if NODE_MINOR_VERSION >= 10 EIO_AfterCallJs(req, 0); #else EIO_AfterCallJs(req); #endif } else { if (args) { // if args is not null and we have to kick this across the thread boundary, make it a global ref dynamicProxyData->args = (jobjectArray) env->NewGlobalRef(args); hasArgsGlobalRef = true; } uv_queue_work(uv_default_loop(), req, EIO_CallJs, (uv_after_work_cb)EIO_AfterCallJs); while(!dynamicProxyData->done) { my_sleep(100); } } if(!dynamicProxyDataVerify(dynamicProxyData)) { throwNewThrowable(env, "java/lang/IllegalStateException", "dynamicProxyData was corrupted"); } if(hasArgsGlobalRef) { env->DeleteGlobalRef(dynamicProxyData->args); } if (dynamicProxyData->done == DYNAMIC_PROXY_JS_ERROR) { throwNewThrowable(env, dynamicProxyData->throwableClass.c_str(), dynamicProxyData->throwableMessage); } jobject result = NULL; if(dynamicProxyData->result) { // need to retain a local ref so that we can return it, otherwise the returned object gets corrupted result = env->NewLocalRef(dynamicProxyData->result); env->DeleteGlobalRef(dynamicProxyData->result); } return result; } JNIEXPORT void JNICALL Java_node_NodeDynamicProxyClass_unref(JNIEnv *env, jobject src, jlong ptr) { DynamicProxyData* dynamicProxyData = (DynamicProxyData*)ptr; unref(dynamicProxyData); }
#include <cstdio> int n,d; long long a[50010],sum,low,high,mid,ans; bool judge(long long t) {     long long h=0;     for (int i=1,j=1;i<=d;i++)     {         h>>=1;         while (h<t&&j<=n)             h+=a[j++];         if (h<t) return 0;     }     return 1; } int main() {     while (scanf("%d%d",&n,&d)==2)     {         sum=0;         for (int i=1;i<=n;i++)         {             scanf("%lld",&a[i]);             sum+=a[i];         }         low=0; high=sum;         while (low<=high)         {             mid=(low+high)>>1;             if (judge(mid)) {ans=mid; low=mid+1;}             else high=mid-1;         }         printf("%lld\n",ans);     }     return 0; }
//========================================================================= // Copyright (C) 2012 The Elastos Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //========================================================================= #include "elastos/droid/text/CBidiFormatterHelper.h" #include "elastos/droid/text/CBidiFormatterBuilder.h" namespace Elastos { namespace Droid { namespace Text { CAR_INTERFACE_IMPL(CBidiFormatterHelper, Singleton, IBidiFormatterHelper) CAR_SINGLETON_IMPL(CBidiFormatterHelper) ECode CBidiFormatterHelper::GetInstance( /* [out] */ IBidiFormatter** instance) { VALIDATE_NOT_NULL(instance) AutoPtr<IBidiFormatterBuilder> builder; CBidiFormatterBuilder::New((IBidiFormatterBuilder**)&builder); return builder->Build(instance); } ECode CBidiFormatterHelper::GetInstance( /* [in] */ Boolean rtlContext, /* [out] */ IBidiFormatter** instance) { VALIDATE_NOT_NULL(instance) AutoPtr<IBidiFormatterBuilder> builder; CBidiFormatterBuilder::New(rtlContext, (IBidiFormatterBuilder**)&builder); return builder->Build(instance); } ECode CBidiFormatterHelper::GetInstance( /* [in] */ ILocale* locale, /* [out] */ IBidiFormatter** instance) { VALIDATE_NOT_NULL(instance) AutoPtr<IBidiFormatterBuilder> builder; CBidiFormatterBuilder::New(locale, (IBidiFormatterBuilder**)&builder); return builder->Build(instance); } }//namespace Text }//namespace Droid }//namespace Elastos
#include "editaddressdialog.h" #include "ui_editaddressdialog.h" #include "addresstablemodel.h" #include "guiutil.h" #include <QDataWidgetMapper> #include <QMessageBox> EditAddressDialog::EditAddressDialog(Mode mode, QWidget *parent) : QDialog(parent), ui(new Ui::EditAddressDialog), mapper(0), mode(mode), model(0) { ui->setupUi(this); GUIUtil::setupAddressWidget(ui->addressEdit, this); switch(mode) { case NewReceivingAddress: setWindowTitle(tr("New receiving address")); ui->addressEdit->setEnabled(false); break; case NewSendingAddress: setWindowTitle(tr("New sending address")); break; case EditReceivingAddress: setWindowTitle(tr("Edit receiving address")); ui->addressEdit->setEnabled(false); break; case EditSendingAddress: setWindowTitle(tr("Edit sending address")); break; } mapper = new QDataWidgetMapper(this); mapper->setSubmitPolicy(QDataWidgetMapper::ManualSubmit); } EditAddressDialog::~EditAddressDialog() { delete ui; } void EditAddressDialog::setModel(AddressTableModel *model) { this->model = model; if(!model) return; mapper->setModel(model); mapper->addMapping(ui->labelEdit, AddressTableModel::Label); mapper->addMapping(ui->addressEdit, AddressTableModel::Address); } void EditAddressDialog::loadRow(int row) { mapper->setCurrentIndex(row); } bool EditAddressDialog::saveCurrentRow() { if(!model) return false; switch(mode) { case NewReceivingAddress: case NewSendingAddress: address = model->addRow( mode == NewSendingAddress ? AddressTableModel::Send : AddressTableModel::Receive, ui->labelEdit->text(), ui->addressEdit->text()); break; case EditReceivingAddress: case EditSendingAddress: if(mapper->submit()) { address = ui->addressEdit->text(); } break; } return !address.isEmpty(); } void EditAddressDialog::accept() { if(!model) return; if(!saveCurrentRow()) { switch(model->getEditStatus()) { case AddressTableModel::OK: // Failed with unknown reason. Just reject. break; case AddressTableModel::NO_CHANGES: // No changes were made during edit operation. Just reject. break; case AddressTableModel::INVALID_ADDRESS: QMessageBox::warning(this, windowTitle(), tr("The entered address \"%1\" is not a valid OmniCoin address.").arg(ui->addressEdit->text()), QMessageBox::Ok, QMessageBox::Ok); break; case AddressTableModel::DUPLICATE_ADDRESS: QMessageBox::warning(this, windowTitle(), tr("The entered address \"%1\" is already in the address book.").arg(ui->addressEdit->text()), QMessageBox::Ok, QMessageBox::Ok); break; case AddressTableModel::WALLET_UNLOCK_FAILURE: QMessageBox::critical(this, windowTitle(), tr("Could not unlock wallet."), QMessageBox::Ok, QMessageBox::Ok); break; case AddressTableModel::KEY_GENERATION_FAILURE: QMessageBox::critical(this, windowTitle(), tr("New key generation failed."), QMessageBox::Ok, QMessageBox::Ok); break; } return; } QDialog::accept(); } QString EditAddressDialog::getAddress() const { return address; } void EditAddressDialog::setAddress(const QString &address) { this->address = address; ui->addressEdit->setText(address); }
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <tuple> #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "mlir/IR/Attributes.h" // from @llvm-project #include "mlir/IR/Block.h" // from @llvm-project #include "mlir/IR/BuiltinOps.h" // from @llvm-project #include "mlir/IR/Operation.h" // from @llvm-project #include "mlir/IR/UseDefLists.h" // from @llvm-project #include "mlir/IR/Value.h" // from @llvm-project #include "mlir/Pass/Pass.h" // from @llvm-project #include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.h" namespace mlir { namespace TFTPU { namespace { constexpr char kDeviceAttr[] = "device"; constexpr char kFuncDeviceAttr[] = "tf.device"; // Checks if a function only contains a tf_executor.graph. bool IsSupportedGraph(FuncOp func) { if (!llvm::hasSingleElement(func)) return false; Block& block = func.front(); if (!llvm::hasSingleElement(block.without_terminator())) return false; auto graph = llvm::dyn_cast<tf_executor::GraphOp>(block.front()); if (!graph) return false; Operation* terminator = block.getTerminator(); if (graph.getNumResults() != terminator->getNumOperands()) return false; for (auto result : llvm::zip(graph.results(), terminator->getOperands())) if (std::get<0>(result) != std::get<1>(result)) return false; return true; } // Checks if an operation of the tf_executor dialect can have TPU devices // propagated through. bool IsSupportedExecutorOp(Operation& op) { auto ops_have_same_device = [](Operation* lhs, Operation* rhs) { auto lhs_device_attr = lhs->getAttrOfType<StringAttr>(kDeviceAttr); auto rhs_device_attr = rhs->getAttrOfType<StringAttr>(kDeviceAttr); return (!lhs_device_attr && !rhs_device_attr) || (lhs_device_attr && rhs_device_attr && lhs_device_attr.getValue() == rhs_device_attr.getValue()); }; // Check if tf_executor.NextIteration.Source/tf_executor.NextIteration.Sink // pair has matching devices or no devices. if (auto source = llvm::dyn_cast<tf_executor::NextIterationSourceOp>(op)) { return ops_have_same_device(source, source.GetSink()); } else if (auto sink = llvm::dyn_cast<tf_executor::NextIterationSinkOp>(op)) { return ops_have_same_device(sink.GetSource(), sink); } return llvm::isa<tf_executor::EnterOp, tf_executor::ExitOp, tf_executor::IslandOp, tf_executor::MergeOp, tf_executor::SwitchOp>(op); } // Assigns all data results to a specified device. void PopulateDeviceForOpResults( Operation& op, llvm::StringRef device, llvm::DenseMap<Value, llvm::StringRef>& value_to_device) { Operation* op_to_update = &op; // Use tf_executor.island op if present as non v1 control flow op results are // forwarded by a parent tf_executor.island op. if (llvm::isa<tf_executor::IslandOp>(op_to_update->getParentOp())) op_to_update = op_to_update->getParentOp(); for (Value result : op_to_update->getResults()) { if (result.getType().isa<tf_executor::TokenType>()) continue; if (result.getType().isa<tf_executor::ControlType>()) break; value_to_device.insert({result, device}); } } // Checks if an operation can have TPU devices propagated through. bool IsSupportedOpToSetDevice(Operation& op) { return IsSupportedExecutorOp(op) || isa<TF::IdentityOp, TF::IdentityNOp, TF::ShapeOp>(op); } // Finds nonconflicting TPU device for an operation from its operands. If an // operand has no device or a non TPU device, or if there are conflicting // devices, and empty StringRef will be returned. Control dependencies, // NextIteration.Source -> NextIteration.Sink token dependencies, and // LoopCond -> Switch data dependencies are ignored. llvm::StringRef FindDeviceFromOperands( Operation& op, const llvm::DenseMap<Value, llvm::StringRef>& value_to_device) { llvm::StringRef new_device; const bool is_switch = llvm::isa<tf_executor::SwitchOp>(op); for (Value operand : op.getOperands()) { if (operand.getType().isa<tf_executor::TokenType>()) continue; if (operand.getType().isa<tf_executor::ControlType>()) break; if (is_switch && llvm::isa_and_nonnull<tf_executor::LoopCondOp>(operand.getDefiningOp())) continue; auto it = value_to_device.find(operand); if (it == value_to_device.end()) return llvm::StringRef(); if (new_device.empty()) { new_device = it->getSecond(); continue; } if (new_device != it->getSecond()) return llvm::StringRef(); } return new_device; } // Propagates devices from function arguments. void PropagateDevicesFromArguments( FuncOp func, llvm::DenseMap<Value, llvm::StringRef>& value_to_device) { for (BlockArgument& arg : func.getArguments()) { auto arg_device_attr = func.getArgAttrOfType<StringAttr>(arg.getArgNumber(), kFuncDeviceAttr); if (!arg_device_attr || arg_device_attr.getValue().empty() || !tensorflow::IsTPUDevice(arg_device_attr.getValue())) continue; value_to_device.insert({arg, arg_device_attr.getValue()}); } } // Propagates devices from operation operands to results. Updating the device of // a tf_executor.NextIteration.Source/tf_executor.NextIteration.Sink will result // in multiple passes over the tf_executor.graph to propagate devices in loops. void PropagateDevicesInGraph( tf_executor::GraphOp graph, llvm::DenseMap<Value, llvm::StringRef>& value_to_device) { auto ops = graph.GetBody().without_terminator(); bool updated_next_iteration = false; do { updated_next_iteration = false; for (Operation& op : ops) { if (!IsSupportedExecutorOp(op)) continue; Operation* op_to_update = &op; // Unpack inner op of tf_executor.island. if (auto island_op = llvm::dyn_cast<tf_executor::IslandOp>(op_to_update)) { if (!island_op.WrapsSingleOp()) continue; op_to_update = &island_op.GetBody().front(); } // If op already has a TPU device set, simply propagate its device. auto device_attr = op_to_update->getAttrOfType<StringAttr>(kDeviceAttr); const bool has_device = device_attr && !device_attr.getValue().empty(); if (has_device && tensorflow::IsTPUDevice(device_attr.getValue())) { PopulateDeviceForOpResults(*op_to_update, device_attr.getValue(), value_to_device); continue; } // Op has an unsupported device. if (has_device) continue; if (!IsSupportedOpToSetDevice(*op_to_update)) continue; llvm::StringRef new_device = FindDeviceFromOperands(*op_to_update, value_to_device); if (new_device.empty()) continue; auto new_device_attr = mlir::StringAttr::get(op_to_update->getContext(), new_device); op_to_update->setAttr(kDeviceAttr, new_device_attr); PopulateDeviceForOpResults(*op_to_update, new_device_attr.getValue(), value_to_device); if (auto sink = llvm::dyn_cast<tf_executor::NextIterationSinkOp>(op_to_update)) { auto source = sink.GetSource(); source->setAttr(kDeviceAttr, new_device_attr); PopulateDeviceForOpResults(*source, new_device_attr.getValue(), value_to_device); updated_next_iteration = true; } } } while (updated_next_iteration); } // Propagates devices to function results. void PropagateDevicesToResults( FuncOp func, tf_executor::FetchOp fetch, const llvm::DenseMap<Value, llvm::StringRef>& value_to_device) { for (OpOperand& operand : fetch.getOperation()->getOpOperands()) { if (operand.get().getType().isa<tf_executor::ControlType>()) break; auto it = value_to_device.find(operand.get()); if (it != value_to_device.end()) { auto device_attr = func.getResultAttrOfType<StringAttr>( operand.getOperandNumber(), kFuncDeviceAttr); if (device_attr && !device_attr.getValue().empty()) continue; func.setResultAttr(operand.getOperandNumber(), kFuncDeviceAttr, StringAttr::get(func.getContext(), it->getSecond())); } } } struct TPUDevicePropagation : public PassWrapper<TPUDevicePropagation, FunctionPass> { void runOnFunction() override; StringRef getArgument() const final { // This is the argument used to refer to the pass in // the textual format (on the commandline for example). return "tf-tpu-device-propagation"; } StringRef getDescription() const final { // This is a brief description of the pass. return "Propagates TPU devices from ops to users"; } }; void TPUDevicePropagation::runOnFunction() { FuncOp func = getFunction(); if (!IsSupportedGraph(func)) return; llvm::DenseMap<Value, llvm::StringRef> value_to_device; PropagateDevicesFromArguments(func, value_to_device); auto graph = llvm::cast<tf_executor::GraphOp>(func.front().front()); PropagateDevicesInGraph(graph, value_to_device); PropagateDevicesToResults(func, graph.GetFetch(), value_to_device); } } // namespace std::unique_ptr<OperationPass<FuncOp>> CreateTPUDevicePropagationPass() { return std::make_unique<TPUDevicePropagation>(); } static PassRegistration<TPUDevicePropagation> pass; } // namespace TFTPU } // namespace mlir
#include "stdafx.h" #include "TrEmailAdd.h" #include "Registry.h" #include "SmtpAuthDlg.h" TrEmailAdd::TrEmailAdd(CWnd* pParent) :CDialog(TrEmailAdd::IDD, pParent), mInChangeMode(FALSE) { //{{AFX_DATA_INIT(TrEmailAdd) mFrom = _T(""); mHost = _T(""); mMessage = _T(""); mPort = 25; mSubject = _T(""); mTo = _T(""); mUser = _T(""); mJuniper = _T(""); mAuthNeeded = FALSE; usessl = FALSE; //}}AFX_DATA_INIT } TrEmailAdd::TrEmailAdd(CString host, CString to, CString from, int port, CString subject, CString message, BOOL needAuth, CString user, CString juniper, BOOL inUsessl) :CDialog(TrEmailAdd::IDD, NULL), mInChangeMode(TRUE), mHost(host), mTo(to), mFrom(from), mPort(port), mSubject(subject), mMessage(message), mAuthNeeded(needAuth), mUser(user), mJuniper(juniper), usessl(inUsessl) { } void TrEmailAdd::DoDataExchange(CDataExchange* pDX) { CDialog::DoDataExchange(pDX); //{{AFX_DATA_MAP(TrEmailAdd) DDX_Control(pDX, IDC_BUTTONSETAUTH, mAuthButton); DDX_Control(pDX, IDOK, mAddUpdateButton); DDX_Text(pDX, IDC_EDITFROM, mFrom); DDV_MaxChars(pDX, mFrom, 60); DDX_Text(pDX, IDC_EDITSMTPSERVER, mHost); DDV_MaxChars(pDX, mHost, 30); DDX_Text(pDX, IDC_EDITMESSAGE, mMessage); DDV_MaxChars(pDX, mMessage, 512); DDX_Text(pDX, IDC_EDITSMTPPORT, mPort); DDV_MinMaxInt(pDX, mPort, 0, 65535); DDX_Text(pDX, IDC_EDITSUBJECT, mSubject); DDV_MaxChars(pDX, mSubject, 255); DDX_Text(pDX, IDC_EDITTO, mTo); DDV_MaxChars(pDX, mTo, 60); DDX_Check(pDX, IDC_CHECKAUTHREQUIRED, mAuthNeeded); DDX_Check(pDX, IDC_CHECKSSLREQUIRED, usessl); //}}AFX_DATA_MAP } BEGIN_MESSAGE_MAP(TrEmailAdd, CDialog) //{{AFX_MSG_MAP(TrEmailAdd) ON_BN_CLICKED(IDC_BUTTONSETAUTH, OnButtonSetAuth) ON_BN_CLICKED(IDC_CHECKAUTHREQUIRED, OnCheckAuthRequired) //}}AFX_MSG_MAP END_MESSAGE_MAP() BOOL TrEmailAdd::OnInitDialog() { CDialog::OnInitDialog(); if (mInChangeMode) mAddUpdateButton.SetWindowText("Update"); else mAddUpdateButton.SetWindowText("Add"); mAuthButton.EnableWindow(mAuthNeeded); return TRUE; } void TrEmailAdd::OnCancel() { CDialog::OnCancel(); } void TrEmailAdd::OnOK() { UpdateData(TRUE); if (mHost.GetLength() == 0) { AfxMessageBox("Incorrect host specified", MB_OK | MB_ICONEXCLAMATION); return; } else if (mTo.GetLength() == 0) { AfxMessageBox("No recepient specified", MB_OK | MB_ICONEXCLAMATION); return; } else { if (!mInChangeMode) { int dummy; try { Registry registry("Email"); if (registry.find(mHost, mFrom, mTo, mSubject, dummy)) { AfxMessageBox("Duplicate Entry", MB_OK | MB_ICONEXCLAMATION); return; } } catch(...) { } } } CDialog::OnOK(); } void TrEmailAdd::OnButtonSetAuth() { int result = IDCANCEL; SmtpAuthDlg* dlg; if (mInChangeMode) dlg = new SmtpAuthDlg(mUser, mJuniper); else dlg = new SmtpAuthDlg; result = dlg->DoModal(); if (result == IDOK) { mUser = dlg->User(); mJuniper = dlg->Password(); } delete dlg; } void TrEmailAdd::OnCheckAuthRequired() { UpdateData(TRUE); mAuthButton.EnableWindow(mAuthNeeded); }
// // Copyright (c) 2021-2022 the rbfx project. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // #include "../Span.h" #include "../ParticleGraphLayerInstance.h" #include "../UpdateContext.h" #include "../../Precompiled.h" #include "Box.h" #include "BoxInstance.h" #include "../ParticleGraphSystem.h" namespace Urho3D { namespace ParticleGraphNodes { void Box::RegisterObject(ParticleGraphSystem* context) { context->AddReflection<Box>(); URHO3D_ACCESSOR_ATTRIBUTE("Box Thickness", GetBoxThickness, SetBoxThickness, Vector3, Vector3{}, AM_DEFAULT); URHO3D_ACCESSOR_ATTRIBUTE("Translation", GetTranslation, SetTranslation, Vector3, Vector3{}, AM_DEFAULT); URHO3D_ACCESSOR_ATTRIBUTE("Rotation", GetRotation, SetRotation, Quaternion, Quaternion{}, AM_DEFAULT); URHO3D_ACCESSOR_ATTRIBUTE("Scale", GetScale, SetScale, Vector3, Vector3{}, AM_DEFAULT); URHO3D_ACCESSOR_ATTRIBUTE("From", GetFrom, SetFrom, int, int{}, AM_DEFAULT); } Box::Box(Context* context) : BaseNodeType(context , PinArray { ParticleGraphPin(ParticleGraphPinFlag::Output, "position", ParticleGraphContainerType::Span), ParticleGraphPin(ParticleGraphPinFlag::Output, "velocity", ParticleGraphContainerType::Span), }) { } /// Evaluate size required to place new node instance. unsigned Box::EvaluateInstanceSize() const { return sizeof(BoxInstance); } /// Place new instance at the provided address. ParticleGraphNodeInstance* Box::CreateInstanceAt(void* ptr, ParticleGraphLayerInstance* layer) { BoxInstance* instance = new (ptr) BoxInstance(); instance->Init(this, layer); return instance; } void Box::SetBoxThickness(Vector3 value) { boxThickness_ = value; } Vector3 Box::GetBoxThickness() const { return boxThickness_; } void Box::SetTranslation(Vector3 value) { translation_ = value; } Vector3 Box::GetTranslation() const { return translation_; } void Box::SetRotation(Quaternion value) { rotation_ = value; } Quaternion Box::GetRotation() const { return rotation_; } void Box::SetScale(Vector3 value) { scale_ = value; } Vector3 Box::GetScale() const { return scale_; } void Box::SetFrom(int value) { from_ = value; } int Box::GetFrom() const { return from_; } } // namespace ParticleGraphNodes } // namespace Urho3D
/* Copyright 2013 Adobe Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ /*************************************************************************************************/ #ifndef ADOBE_ALGORITHM_SORTED_HPP #define ADOBE_ALGORITHM_SORTED_HPP #include <algorithm> #include <functional> #include <iterator> #include <boost/range/begin.hpp> #include <boost/range/end.hpp> #include <adobe/functional/operator.hpp> /*************************************************************************************************/ namespace adobe { /*************************************************************************************************/ /*! \defgroup sorted sorted \ingroup sorting */ /*! \ingroup sorted */ template <typename I, // I models InputIterator typename O> // O models StrictWeakOrdering on value_type(I) I sorted(I f, I l, O o) { f = std::adjacent_find(f, l, std::bind(std::logical_not<bool>(), std::bind(o, std::placeholders::_1, std::placeholders::_2))); if (f != l) ++f; return f; } /*************************************************************************************************/ /*! \ingroup sorted */ template <typename I> // I models InputIterator, value_type(I) models LessThanComparable I sorted(I f, I l) { return sorted(f, l, less()); } /*************************************************************************************************/ /*! \ingroup sorted */ template <typename I, // I models InputIterator typename O> // O models StrictWeakOrdering on value_type(I) inline bool is_sorted(I f, I l, O o) { return std::adjacent_find(f, l, std::bind(std::logical_not<bool>(), std::bind(o, std::placeholders::_1, std::placeholders::_2))) == l; } /*************************************************************************************************/ /*! \ingroup sorted */ template <typename I> // I models InputIterator, value_type(I) models LessThanComparable inline bool is_sorted(I f, I l) { return is_sorted(f, l, less()); } /*************************************************************************************************/ /*! \ingroup sorted */ template <typename I, // I models ForwardIterator typename C, // C models StrictWeakOrdering(T, T) typename P> // P models UnaryFunction(value_type(I)) -> T inline bool is_sorted(I f, I l, C c, P p) { return std::adjacent_find(f, l, std::bind(std::logical_not<bool>(), std::bind(c, std::bind(p, std::placeholders::_1), std::bind(p, std::placeholders::_2)))) == l; } /*************************************************************************************************/ /*! \ingroup sorted */ template <typename I, // I models ForwardRange typename C, // C models StrictWeakOrdering(T, T) typename P> // P models UnaryFunction(value_type(I)) -> T inline bool is_sorted(const I& r, C c, P p) { return is_sorted(boost::begin(r), boost::end(r), c, p); } /*************************************************************************************************/ /*! \ingroup sorted */ template <typename I, // I models ForwardRange typename C> // C models StrictWeakOrdering(T, T) inline bool is_sorted(const I& r, C c) { return is_sorted(boost::begin(r), boost::end(r), c, identity<typename std::iterator_traits<I>::value_type>()); } /*************************************************************************************************/ /*! \ingroup sorted */ template <typename I> // I models ForwardRange inline bool is_sorted(const I& r) { return is_sorted(boost::begin(r), boost::end(r), less()); } /*************************************************************************************************/ } // namespace adobe /*************************************************************************************************/ #endif /*************************************************************************************************/
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. #include "db/external_sst_file_ingestion_job.h" #define __STDC_FORMAT_MACROS #include <inttypes.h> #include <algorithm> #include <string> #include <vector> #include "db/version_edit.h" #include "table/merging_iterator.h" #include "table/scoped_arena_iterator.h" #include "table/sst_file_writer_collectors.h" #include "table/table_builder.h" #include "util/file_reader_writer.h" #include "util/file_util.h" #include "util/stop_watch.h" #include "util/sync_point.h" namespace rocksdb { Status ExternalSstFileIngestionJob::Prepare( const std::vector<std::string>& external_files_paths) { Status status; // Read the information of files we are ingesting for (const std::string& file_path : external_files_paths) { IngestedFileInfo file_to_ingest; status = GetIngestedFileInfo(file_path, &file_to_ingest); if (!status.ok()) { return status; } files_to_ingest_.push_back(file_to_ingest); } for (const IngestedFileInfo& f : files_to_ingest_) { if (f.cf_id != TablePropertiesCollectorFactory::Context::kUnknownColumnFamily && f.cf_id != cfd_->GetID()) { return Status::InvalidArgument( "External file column family id dont match"); } } const Comparator* ucmp = cfd_->internal_comparator().user_comparator(); auto num_files = files_to_ingest_.size(); if (num_files == 0) { return Status::InvalidArgument("The list of files is empty"); } else if (num_files > 1) { // Verify that passed files dont have overlapping ranges autovector<const IngestedFileInfo*> sorted_files; for (size_t i = 0; i < num_files; i++) { sorted_files.push_back(&files_to_ingest_[i]); } std::sort( sorted_files.begin(), sorted_files.end(), [&ucmp](const IngestedFileInfo* info1, const IngestedFileInfo* info2) { return ucmp->Compare(info1->smallest_user_key, info2->smallest_user_key) < 0; }); for (size_t i = 0; i < num_files - 1; i++) { if (ucmp->Compare(sorted_files[i]->largest_user_key, sorted_files[i + 1]->smallest_user_key) >= 0) { return Status::NotSupported("Files have overlapping ranges"); } } } for (IngestedFileInfo& f : files_to_ingest_) { if (f.num_entries == 0) { return Status::InvalidArgument("File contain no entries"); } if (!f.smallest_internal_key().Valid() || !f.largest_internal_key().Valid()) { return Status::Corruption("Generated table have corrupted keys"); } } // Copy/Move external files into DB for (IngestedFileInfo& f : files_to_ingest_) { f.fd = FileDescriptor(versions_->NewFileNumber(), 0, f.file_size); const std::string path_outside_db = f.external_file_path; const std::string path_inside_db = TableFileName(db_options_.db_paths, f.fd.GetNumber(), f.fd.GetPathId()); if (ingestion_options_.move_files) { status = env_->LinkFile(path_outside_db, path_inside_db); if (status.IsNotSupported()) { // Original file is on a different FS, use copy instead of hard linking status = CopyFile(env_, path_outside_db, path_inside_db, 0, db_options_.use_fsync); } } else { status = CopyFile(env_, path_outside_db, path_inside_db, 0, db_options_.use_fsync); } TEST_SYNC_POINT("DBImpl::AddFile:FileCopied"); if (!status.ok()) { break; } f.internal_file_path = path_inside_db; } if (!status.ok()) { // We failed, remove all files that we copied into the db for (IngestedFileInfo& f : files_to_ingest_) { if (f.internal_file_path == "") { break; } Status s = env_->DeleteFile(f.internal_file_path); if (!s.ok()) { Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, "AddFile() clean up for file %s failed : %s", f.internal_file_path.c_str(), s.ToString().c_str()); } } } return status; } Status ExternalSstFileIngestionJob::NeedsFlush(bool* flush_needed) { SuperVersion* super_version = cfd_->GetSuperVersion(); Status status = IngestedFilesOverlapWithMemtables(super_version, flush_needed); if (status.ok() && *flush_needed && !ingestion_options_.allow_blocking_flush) { status = Status::InvalidArgument("External file requires flush"); } return status; } Status ExternalSstFileIngestionJob::Run() { Status status; #ifndef NDEBUG // We should never run the job with a memtable that is overlapping // with the files we are ingesting bool need_flush = false; status = NeedsFlush(&need_flush); assert(status.ok() && need_flush == false); #endif bool consumed_seqno = false; bool force_global_seqno = false; const SequenceNumber last_seqno = versions_->LastSequence(); if (ingestion_options_.snapshot_consistency && !db_snapshots_->empty()) { // We need to assign a global sequence number to all the files even // if the dont overlap with any ranges since we have snapshots force_global_seqno = true; } SuperVersion* super_version = cfd_->GetSuperVersion(); edit_.SetColumnFamily(cfd_->GetID()); // The levels that the files will be ingested into for (IngestedFileInfo& f : files_to_ingest_) { bool overlap_with_db = false; status = AssignLevelForIngestedFile(super_version, &f, &overlap_with_db); if (!status.ok()) { return status; } if (overlap_with_db || force_global_seqno) { status = AssignGlobalSeqnoForIngestedFile(&f, last_seqno + 1); consumed_seqno = true; } else { status = AssignGlobalSeqnoForIngestedFile(&f, 0); } if (!status.ok()) { return status; } edit_.AddFile(f.picked_level, f.fd.GetNumber(), f.fd.GetPathId(), f.fd.GetFileSize(), f.smallest_internal_key(), f.largest_internal_key(), f.assigned_seqno, f.assigned_seqno, false); } if (consumed_seqno) { versions_->SetLastSequence(last_seqno + 1); } return status; } void ExternalSstFileIngestionJob::UpdateStats() { // Update internal stats for new ingested files uint64_t total_keys = 0; uint64_t total_l0_files = 0; uint64_t total_time = env_->NowMicros() - job_start_time_; for (IngestedFileInfo& f : files_to_ingest_) { InternalStats::CompactionStats stats(1); stats.micros = total_time; stats.bytes_written = f.fd.GetFileSize(); stats.num_output_files = 1; cfd_->internal_stats()->AddCompactionStats(f.picked_level, stats); cfd_->internal_stats()->AddCFStats(InternalStats::BYTES_INGESTED_ADD_FILE, f.fd.GetFileSize()); total_keys += f.num_entries; if (f.picked_level == 0) { total_l0_files += 1; } Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, "[AddFile] External SST file %s was ingested in L%d with path %s " "(global_seqno=%" PRIu64 ")\n", f.external_file_path.c_str(), f.picked_level, f.internal_file_path.c_str(), f.assigned_seqno); } cfd_->internal_stats()->AddCFStats(InternalStats::INGESTED_NUM_KEYS_TOTAL, total_keys); cfd_->internal_stats()->AddCFStats(InternalStats::INGESTED_NUM_FILES_TOTAL, files_to_ingest_.size()); cfd_->internal_stats()->AddCFStats( InternalStats::INGESTED_LEVEL0_NUM_FILES_TOTAL, total_l0_files); } void ExternalSstFileIngestionJob::Cleanup(const Status& status) { if (!status.ok()) { // We failed to add the files to the database // remove all the files we copied for (IngestedFileInfo& f : files_to_ingest_) { Status s = env_->DeleteFile(f.internal_file_path); if (!s.ok()) { Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, "AddFile() clean up for file %s failed : %s", f.internal_file_path.c_str(), s.ToString().c_str()); } } } else if (status.ok() && ingestion_options_.move_files) { // The files were moved and added successfully, remove original file links for (IngestedFileInfo& f : files_to_ingest_) { Status s = env_->DeleteFile(f.external_file_path); if (!s.ok()) { Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, "%s was added to DB successfully but failed to remove original " "file link : %s", f.external_file_path.c_str(), s.ToString().c_str()); } } } } Status ExternalSstFileIngestionJob::GetIngestedFileInfo( const std::string& external_file, IngestedFileInfo* file_to_ingest) { file_to_ingest->external_file_path = external_file; // Get external file size Status status = env_->GetFileSize(external_file, &file_to_ingest->file_size); if (!status.ok()) { return status; } // Create TableReader for external file std::unique_ptr<TableReader> table_reader; std::unique_ptr<RandomAccessFile> sst_file; std::unique_ptr<RandomAccessFileReader> sst_file_reader; status = env_->NewRandomAccessFile(external_file, &sst_file, env_options_); if (!status.ok()) { return status; } sst_file_reader.reset(new RandomAccessFileReader(std::move(sst_file))); status = cfd_->ioptions()->table_factory->NewTableReader( TableReaderOptions(*cfd_->ioptions(), env_options_, cfd_->internal_comparator()), std::move(sst_file_reader), file_to_ingest->file_size, &table_reader); if (!status.ok()) { return status; } // Get the external file properties auto props = table_reader->GetTableProperties(); const auto& uprops = props->user_collected_properties; // Get table version auto version_iter = uprops.find(ExternalSstFilePropertyNames::kVersion); if (version_iter == uprops.end()) { return Status::Corruption("External file version not found"); } file_to_ingest->version = DecodeFixed32(version_iter->second.c_str()); auto seqno_iter = uprops.find(ExternalSstFilePropertyNames::kGlobalSeqno); if (file_to_ingest->version == 2) { // version 2 imply that we have global sequence number if (seqno_iter == uprops.end()) { return Status::Corruption( "External file global sequence number not found"); } // Set the global sequence number file_to_ingest->original_seqno = DecodeFixed64(seqno_iter->second.c_str()); file_to_ingest->global_seqno_offset = props->properties_offsets.at( ExternalSstFilePropertyNames::kGlobalSeqno); if (file_to_ingest->global_seqno_offset == 0) { return Status::Corruption("Was not able to find file global seqno field"); } } else if (file_to_ingest->version == 1) { // SST file V1 should not have global seqno field assert(seqno_iter == uprops.end()); if (ingestion_options_.allow_blocking_flush || ingestion_options_.allow_global_seqno) { return Status::InvalidArgument( "External SST file V1 does not support global seqno"); } } else { return Status::InvalidArgument("External file version is not supported"); } // Get number of entries in table file_to_ingest->num_entries = props->num_entries; ParsedInternalKey key; ReadOptions ro; // During reading the external file we can cache blocks that we read into // the block cache, if we later change the global seqno of this file, we will // have block in cache that will include keys with wrong seqno. // We need to disable fill_cache so that we read from the file without // updating the block cache. ro.fill_cache = false; std::unique_ptr<InternalIterator> iter(table_reader->NewIterator(ro)); // Get first (smallest) key from file iter->SeekToFirst(); if (!ParseInternalKey(iter->key(), &key)) { return Status::Corruption("external file have corrupted keys"); } if (key.sequence != 0) { return Status::Corruption("external file have non zero sequence number"); } file_to_ingest->smallest_user_key = key.user_key.ToString(); // Get last (largest) key from file iter->SeekToLast(); if (!ParseInternalKey(iter->key(), &key)) { return Status::Corruption("external file have corrupted keys"); } if (key.sequence != 0) { return Status::Corruption("external file have non zero sequence number"); } file_to_ingest->largest_user_key = key.user_key.ToString(); file_to_ingest->cf_id = static_cast<uint32_t>(props->column_family_id); file_to_ingest->table_properties = *props; return status; } Status ExternalSstFileIngestionJob::IngestedFilesOverlapWithMemtables( SuperVersion* sv, bool* overlap) { // Create an InternalIterator over all memtables Arena arena; ReadOptions ro; ro.total_order_seek = true; MergeIteratorBuilder merge_iter_builder(&cfd_->internal_comparator(), &arena); merge_iter_builder.AddIterator(sv->mem->NewIterator(ro, &arena)); sv->imm->AddIterators(ro, &merge_iter_builder); ScopedArenaIterator memtable_iter(merge_iter_builder.Finish()); Status status; *overlap = false; for (IngestedFileInfo& f : files_to_ingest_) { status = IngestedFileOverlapWithIteratorRange(&f, memtable_iter.get(), overlap); if (!status.ok() || *overlap == true) { break; } } return status; } Status ExternalSstFileIngestionJob::AssignLevelForIngestedFile( SuperVersion* sv, IngestedFileInfo* file_to_ingest, bool* overlap_with_db) { *overlap_with_db = false; Arena arena; ReadOptions ro; ro.total_order_seek = true; Status status; int target_level = 0; auto* vstorage = cfd_->current()->storage_info(); for (int lvl = 0; lvl < cfd_->NumberLevels(); lvl++) { if (lvl > 0 && lvl < vstorage->base_level()) { continue; } if (vstorage->NumLevelFiles(lvl) > 0) { bool overlap_with_level = false; MergeIteratorBuilder merge_iter_builder(&cfd_->internal_comparator(), &arena); RangeDelAggregator range_del_agg(cfd_->internal_comparator(), {} /* snapshots */); sv->current->AddIteratorsForLevel(ro, env_options_, &merge_iter_builder, lvl, &range_del_agg); if (!range_del_agg.IsEmpty()) { return Status::NotSupported( "file ingestion with range tombstones is currently unsupported"); } ScopedArenaIterator level_iter(merge_iter_builder.Finish()); status = IngestedFileOverlapWithIteratorRange( file_to_ingest, level_iter.get(), &overlap_with_level); if (!status.ok()) { return status; } if (overlap_with_level) { // We must use L0 or any level higher than `lvl` to be able to overwrite // the keys that we overlap with in this level, We also need to assign // this file a seqno to overwrite the existing keys in level `lvl` *overlap_with_db = true; break; } } // We dont overlap with any keys in this level, but we still need to check // if our file can fit in it if (IngestedFileFitInLevel(file_to_ingest, lvl)) { target_level = lvl; } } file_to_ingest->picked_level = target_level; return status; } Status ExternalSstFileIngestionJob::AssignGlobalSeqnoForIngestedFile( IngestedFileInfo* file_to_ingest, SequenceNumber seqno) { if (file_to_ingest->original_seqno == seqno) { // This file already have the correct global seqno return Status::OK(); } else if (!ingestion_options_.allow_global_seqno) { return Status::InvalidArgument("Global seqno is required, but disabled"); } else if (file_to_ingest->global_seqno_offset == 0) { return Status::InvalidArgument( "Trying to set global seqno for a file that dont have a global seqno " "field"); } std::unique_ptr<RandomRWFile> rwfile; Status status = env_->NewRandomRWFile(file_to_ingest->internal_file_path, &rwfile, env_options_); if (!status.ok()) { return status; } // Write the new seqno in the global sequence number field in the file std::string seqno_val; PutFixed64(&seqno_val, seqno); status = rwfile->Write(file_to_ingest->global_seqno_offset, seqno_val); if (status.ok()) { file_to_ingest->assigned_seqno = seqno; } return status; } Status ExternalSstFileIngestionJob::IngestedFileOverlapWithIteratorRange( const IngestedFileInfo* file_to_ingest, InternalIterator* iter, bool* overlap) { auto* vstorage = cfd_->current()->storage_info(); auto* ucmp = vstorage->InternalComparator()->user_comparator(); InternalKey range_start(file_to_ingest->smallest_user_key, kMaxSequenceNumber, kValueTypeForSeek); iter->Seek(range_start.Encode()); if (!iter->status().ok()) { return iter->status(); } *overlap = false; if (iter->Valid()) { ParsedInternalKey seek_result; if (!ParseInternalKey(iter->key(), &seek_result)) { return Status::Corruption("DB have corrupted keys"); } if (ucmp->Compare(seek_result.user_key, file_to_ingest->largest_user_key) <= 0) { *overlap = true; } } return iter->status(); } bool ExternalSstFileIngestionJob::IngestedFileFitInLevel( const IngestedFileInfo* file_to_ingest, int level) { if (level == 0) { // Files can always fit in L0 return true; } auto* vstorage = cfd_->current()->storage_info(); Slice file_smallest_user_key(file_to_ingest->smallest_user_key); Slice file_largest_user_key(file_to_ingest->largest_user_key); if (vstorage->OverlapInLevel(level, &file_smallest_user_key, &file_largest_user_key)) { // File overlap with another files in this level, we cannot // add it to this level return false; } if (cfd_->RangeOverlapWithCompaction(file_smallest_user_key, file_largest_user_key, level)) { // File overlap with a running compaction output that will be stored // in this level, we cannot add this file to this level return false; } // File did not overlap with level files, our compaction output return true; } } // namespace rocksdb
// -*- C++ -*- // Copyright (C) 2005, 2006, 2009, 2011 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. // Copyright (C) 2004 Ami Tavory and Vladimir Dreizin, IBM-HRL. // Permission to use, copy, modify, sell, and distribute this software // is hereby granted without fee, provided that the above copyright // notice appears in all copies, and that both that copyright notice // and this permission notice appear in supporting documentation. None // of the above authors, nor IBM Haifa Research Laboratories, make any // representation about the suitability of this software for any // purpose. It is provided "as is" without express or implied // warranty. /** * @file ranged_hash_fn.hpp * Contains a unified ranged hash functor, allowing the hash tables * to deal with a single class for ranged hashing. */ #ifndef PB_DS_RANGED_HASH_FN_HPP #define PB_DS_RANGED_HASH_FN_HPP #include <utility> #include <debug/debug.h> namespace __gnu_pbds { namespace detail { /// Primary template. template<typename Key, typename Hash_Fn, typename _Alloc, typename Comb_Hash_Fn, bool Store_Hash> class ranged_hash_fn; #define PB_DS_CLASS_T_DEC \ template<typename Key, typename Hash_Fn, typename _Alloc, \ typename Comb_Hash_Fn> #define PB_DS_CLASS_C_DEC \ ranged_hash_fn<Key, Hash_Fn, _Alloc, Comb_Hash_Fn, false> /** * Specialization 1 * The client supplies a hash function and a ranged hash function, * and requests that hash values not be stored. **/ template<typename Key, typename Hash_Fn, typename _Alloc, typename Comb_Hash_Fn> class ranged_hash_fn< Key, Hash_Fn, _Alloc, Comb_Hash_Fn, false> : public Hash_Fn, public Comb_Hash_Fn { protected: typedef typename _Alloc::size_type size_type; typedef Hash_Fn hash_fn_base; typedef Comb_Hash_Fn comb_hash_fn_base; typedef typename _Alloc::template rebind< Key>::other key_allocator; typedef typename key_allocator::const_reference key_const_reference; ranged_hash_fn(size_type); ranged_hash_fn(size_type, const Hash_Fn&); ranged_hash_fn(size_type, const Hash_Fn&, const Comb_Hash_Fn&); void swap(PB_DS_CLASS_C_DEC&); void notify_resized(size_type); inline size_type operator()(key_const_reference) const; }; PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size) { Comb_Hash_Fn::notify_resized(size); } PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size, const Hash_Fn& r_hash_fn) : Hash_Fn(r_hash_fn) { Comb_Hash_Fn::notify_resized(size); } PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size, const Hash_Fn& r_hash_fn, const Comb_Hash_Fn& r_comb_hash_fn) : Hash_Fn(r_hash_fn), Comb_Hash_Fn(r_comb_hash_fn) { comb_hash_fn_base::notify_resized(size); } PB_DS_CLASS_T_DEC void PB_DS_CLASS_C_DEC:: swap(PB_DS_CLASS_C_DEC& other) { comb_hash_fn_base::swap(other); std::swap((Hash_Fn& )(*this), (Hash_Fn& )other); } PB_DS_CLASS_T_DEC void PB_DS_CLASS_C_DEC:: notify_resized(size_type size) { comb_hash_fn_base::notify_resized(size); } PB_DS_CLASS_T_DEC inline typename PB_DS_CLASS_C_DEC::size_type PB_DS_CLASS_C_DEC:: operator()(key_const_reference r_key) const { return (comb_hash_fn_base::operator()(hash_fn_base::operator()(r_key)));} #undef PB_DS_CLASS_T_DEC #undef PB_DS_CLASS_C_DEC #define PB_DS_CLASS_T_DEC \ template<typename Key, typename Hash_Fn, typename _Alloc, \ typename Comb_Hash_Fn> #define PB_DS_CLASS_C_DEC \ ranged_hash_fn<Key,Hash_Fn, _Alloc, Comb_Hash_Fn, true> /** * Specialization 2 * The client supplies a hash function and a ranged hash function, * and requests that hash values be stored. **/ template<typename Key, typename Hash_Fn, typename _Alloc, typename Comb_Hash_Fn> class ranged_hash_fn<Key, Hash_Fn, _Alloc, Comb_Hash_Fn, true> : public Hash_Fn, public Comb_Hash_Fn { protected: typedef typename _Alloc::size_type size_type; typedef std::pair<size_type, size_type> comp_hash; typedef Hash_Fn hash_fn_base; typedef Comb_Hash_Fn comb_hash_fn_base; typedef typename _Alloc::template rebind<Key>::other key_allocator; typedef typename key_allocator::const_reference key_const_reference; ranged_hash_fn(size_type); ranged_hash_fn(size_type, const Hash_Fn&); ranged_hash_fn(size_type, const Hash_Fn&, const Comb_Hash_Fn&); void swap(PB_DS_CLASS_C_DEC&); void notify_resized(size_type); inline comp_hash operator()(key_const_reference) const; inline comp_hash operator()(key_const_reference, size_type) const; }; PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size) { Comb_Hash_Fn::notify_resized(size); } PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size, const Hash_Fn& r_hash_fn) : Hash_Fn(r_hash_fn) { Comb_Hash_Fn::notify_resized(size); } PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size, const Hash_Fn& r_hash_fn, const Comb_Hash_Fn& r_comb_hash_fn) : Hash_Fn(r_hash_fn), Comb_Hash_Fn(r_comb_hash_fn) { comb_hash_fn_base::notify_resized(size); } PB_DS_CLASS_T_DEC void PB_DS_CLASS_C_DEC:: swap(PB_DS_CLASS_C_DEC& other) { comb_hash_fn_base::swap(other); std::swap((Hash_Fn& )(*this), (Hash_Fn& )other); } PB_DS_CLASS_T_DEC void PB_DS_CLASS_C_DEC:: notify_resized(size_type size) { comb_hash_fn_base::notify_resized(size); } PB_DS_CLASS_T_DEC inline typename PB_DS_CLASS_C_DEC::comp_hash PB_DS_CLASS_C_DEC:: operator()(key_const_reference r_key) const { const size_type hash = hash_fn_base::operator()(r_key); return std::make_pair(comb_hash_fn_base::operator()(hash), hash); } PB_DS_CLASS_T_DEC inline typename PB_DS_CLASS_C_DEC::comp_hash PB_DS_CLASS_C_DEC:: operator() #ifdef _GLIBCXX_DEBUG (key_const_reference r_key, size_type hash) const #else (key_const_reference /*r_key*/, size_type hash) const #endif { _GLIBCXX_DEBUG_ASSERT(hash == hash_fn_base::operator()(r_key)); return std::make_pair(comb_hash_fn_base::operator()(hash), hash); } #undef PB_DS_CLASS_T_DEC #undef PB_DS_CLASS_C_DEC #define PB_DS_CLASS_T_DEC \ template<typename Key, typename _Alloc, typename Comb_Hash_Fn> #define PB_DS_CLASS_C_DEC \ ranged_hash_fn<Key, null_type, _Alloc, Comb_Hash_Fn, false> /** * Specialization 3 * The client does not supply a hash function (by specifying * null_type as the Hash_Fn parameter), and requests that hash * values not be stored. **/ template<typename Key, typename _Alloc, typename Comb_Hash_Fn> class ranged_hash_fn<Key, null_type, _Alloc, Comb_Hash_Fn, false> : public Comb_Hash_Fn { protected: typedef typename _Alloc::size_type size_type; typedef Comb_Hash_Fn comb_hash_fn_base; ranged_hash_fn(size_type); ranged_hash_fn(size_type, const Comb_Hash_Fn&); ranged_hash_fn(size_type, const null_type&, const Comb_Hash_Fn&); void swap(PB_DS_CLASS_C_DEC&); }; PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size) { Comb_Hash_Fn::notify_resized(size); } PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size, const Comb_Hash_Fn& r_comb_hash_fn) : Comb_Hash_Fn(r_comb_hash_fn) { } PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size, const null_type& r_null_type, const Comb_Hash_Fn& r_comb_hash_fn) : Comb_Hash_Fn(r_comb_hash_fn) { } PB_DS_CLASS_T_DEC void PB_DS_CLASS_C_DEC:: swap(PB_DS_CLASS_C_DEC& other) { comb_hash_fn_base::swap(other); } #undef PB_DS_CLASS_T_DEC #undef PB_DS_CLASS_C_DEC #define PB_DS_CLASS_T_DEC \ template<typename Key, typename _Alloc, typename Comb_Hash_Fn> #define PB_DS_CLASS_C_DEC \ ranged_hash_fn<Key, null_type, _Alloc, Comb_Hash_Fn, true> /** * Specialization 4 * The client does not supply a hash function (by specifying * null_type as the Hash_Fn parameter), and requests that hash * values be stored. **/ template<typename Key, typename _Alloc, typename Comb_Hash_Fn> class ranged_hash_fn<Key, null_type, _Alloc, Comb_Hash_Fn, true> : public Comb_Hash_Fn { protected: typedef typename _Alloc::size_type size_type; typedef Comb_Hash_Fn comb_hash_fn_base; ranged_hash_fn(size_type); ranged_hash_fn(size_type, const Comb_Hash_Fn&); ranged_hash_fn(size_type, const null_type&, const Comb_Hash_Fn&); void swap(PB_DS_CLASS_C_DEC&); }; PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size) { Comb_Hash_Fn::notify_resized(size); } PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size, const Comb_Hash_Fn& r_comb_hash_fn) : Comb_Hash_Fn(r_comb_hash_fn) { } PB_DS_CLASS_T_DEC PB_DS_CLASS_C_DEC:: ranged_hash_fn(size_type size, const null_type& r_null_type, const Comb_Hash_Fn& r_comb_hash_fn) : Comb_Hash_Fn(r_comb_hash_fn) { } PB_DS_CLASS_T_DEC void PB_DS_CLASS_C_DEC:: swap(PB_DS_CLASS_C_DEC& other) { comb_hash_fn_base::swap(other); } #undef PB_DS_CLASS_T_DEC #undef PB_DS_CLASS_C_DEC } // namespace detail } // namespace __gnu_pbds #endif
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the Apache 2.0 License. #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include "consensus/aft/raft.h" #include "ds/logger.h" #include "logging_stub.h" #define DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES #include <chrono> #include <doctest/doctest.h> #include <string> using namespace std; using ms = std::chrono::milliseconds; using TRaft = aft::Aft<aft::LedgerStubProxy, aft::ChannelStubProxy, aft::StubSnapshotter>; using Store = aft::LoggingStubStore; using StoreSig = aft::LoggingStubStoreSig; using Adaptor = aft::Adaptor<Store>; threading::ThreadMessaging threading::ThreadMessaging::thread_messaging; std::atomic<uint16_t> threading::ThreadMessaging::thread_count = 0; std::vector<uint8_t> cert; DOCTEST_TEST_CASE("Single node startup" * doctest::test_suite("single")) { auto kv_store = std::make_shared<Store>(0); aft::NodeId node_id(0); ms election_timeout(150); TRaft r0( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store), std::make_unique<aft::LedgerStubProxy>(node_id), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id), nullptr, nullptr, nullptr, ms(10), election_timeout, ms(1000)); kv::Configuration::Nodes config; config.try_emplace(node_id); r0.add_configuration(0, config); DOCTEST_INFO("DOCTEST_REQUIRE Initial State"); DOCTEST_REQUIRE(!r0.is_primary()); DOCTEST_REQUIRE(r0.leader() == aft::NoNode); DOCTEST_REQUIRE(r0.get_term() == 0); DOCTEST_REQUIRE(r0.get_commit_idx() == 0); DOCTEST_INFO( "In the absence of other nodes, become leader after election timeout"); r0.periodic(ms(0)); DOCTEST_REQUIRE(!r0.is_primary()); r0.periodic(election_timeout * 2); DOCTEST_REQUIRE(r0.is_primary()); DOCTEST_REQUIRE(r0.leader() == node_id); } DOCTEST_TEST_CASE("Single node commit" * doctest::test_suite("single")) { auto kv_store = std::make_shared<Store>(0); aft::NodeId node_id(0); ms election_timeout(150); TRaft r0( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store), std::make_unique<aft::LedgerStubProxy>(node_id), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id), nullptr, nullptr, nullptr, ms(10), election_timeout, ms(1000)); aft::Configuration::Nodes config; config[node_id] = {}; r0.add_configuration(0, config); DOCTEST_INFO("Become leader after election timeout"); r0.periodic(election_timeout * 2); DOCTEST_REQUIRE(r0.is_primary()); DOCTEST_INFO("Observe that data is committed on replicate immediately"); for (size_t i = 1; i <= 5; ++i) { auto entry = std::make_shared<std::vector<uint8_t>>(); entry->push_back(1); entry->push_back(2); entry->push_back(3); auto hooks = std::make_shared<kv::ConsensusHookPtrs>(); r0.replicate(kv::BatchVector{{i, entry, true, hooks}}, 1); DOCTEST_REQUIRE(r0.get_last_idx() == i); DOCTEST_REQUIRE(r0.get_commit_idx() == i); } } DOCTEST_TEST_CASE( "Multiple nodes startup and election" * doctest::test_suite("multiple")) { auto kv_store0 = std::make_shared<Store>(0); auto kv_store1 = std::make_shared<Store>(1); auto kv_store2 = std::make_shared<Store>(2); aft::NodeId node_id0(0); aft::NodeId node_id1(1); aft::NodeId node_id2(2); ms request_timeout(10); TRaft r0( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store0), std::make_unique<aft::LedgerStubProxy>(node_id0), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id0), nullptr, nullptr, nullptr, request_timeout, ms(20), ms(1000)); TRaft r1( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store1), std::make_unique<aft::LedgerStubProxy>(node_id1), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id1), nullptr, nullptr, nullptr, request_timeout, ms(100), ms(1000)); TRaft r2( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store2), std::make_unique<aft::LedgerStubProxy>(node_id2), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id2), nullptr, nullptr, nullptr, request_timeout, ms(50), ms(1000)); aft::Configuration::Nodes config; config[node_id0] = {}; config[node_id1] = {}; config[node_id2] = {}; r0.add_configuration(0, config); r1.add_configuration(0, config); r2.add_configuration(0, config); auto by_0 = [](auto const& lhs, auto const& rhs) -> bool { return get<0>(lhs) < get<0>(rhs); }; DOCTEST_INFO("Node 0 exceeds its election timeout and starts an election"); r0.periodic(std::chrono::milliseconds(200)); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_request_vote.size() == 2); ((aft::ChannelStubProxy*)r0.channels.get())->sent_request_vote.sort(by_0); DOCTEST_INFO("Node 1 receives the request"); auto rv = ((aft::ChannelStubProxy*)r0.channels.get())->sent_request_vote.front(); ((aft::ChannelStubProxy*)r0.channels.get())->sent_request_vote.pop_front(); DOCTEST_REQUIRE(get<0>(rv) == node_id1); auto rvc = get<1>(rv); DOCTEST_REQUIRE(rvc.term == 1); DOCTEST_REQUIRE(rvc.last_committable_idx == 0); DOCTEST_REQUIRE( rvc.term_of_last_committable_idx == aft::ViewHistory::InvalidView); r1.recv_message(reinterpret_cast<uint8_t*>(&rvc), sizeof(rvc)); DOCTEST_INFO("Node 2 receives the request"); rv = ((aft::ChannelStubProxy*)r0.channels.get())->sent_request_vote.front(); ((aft::ChannelStubProxy*)r0.channels.get())->sent_request_vote.pop_front(); DOCTEST_REQUIRE(get<0>(rv) == node_id2); rvc = get<1>(rv); DOCTEST_REQUIRE(rvc.term == 1); DOCTEST_REQUIRE(rvc.last_committable_idx == 0); DOCTEST_REQUIRE( rvc.term_of_last_committable_idx == aft::ViewHistory::InvalidView); r2.recv_message(reinterpret_cast<uint8_t*>(&rvc), sizeof(rvc)); DOCTEST_INFO("Node 1 votes for Node 0"); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r1.channels.get()) ->sent_request_vote_response.size() == 1); auto rvr = ((aft::ChannelStubProxy*)r1.channels.get()) ->sent_request_vote_response.front(); ((aft::ChannelStubProxy*)r1.channels.get()) ->sent_request_vote_response.pop_front(); DOCTEST_REQUIRE(get<0>(rvr) == node_id0); auto rvrc = get<1>(rvr); DOCTEST_REQUIRE(rvrc.term == 1); DOCTEST_REQUIRE(rvrc.vote_granted); r0.recv_message(reinterpret_cast<uint8_t*>(&rvrc), sizeof(rvrc)); DOCTEST_INFO("Node 2 votes for Node 0"); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r2.channels.get()) ->sent_request_vote_response.size() == 1); rvr = ((aft::ChannelStubProxy*)r2.channels.get()) ->sent_request_vote_response.front(); ((aft::ChannelStubProxy*)r2.channels.get()) ->sent_request_vote_response.pop_front(); DOCTEST_REQUIRE(get<0>(rvr) == node_id0); rvrc = get<1>(rvr); DOCTEST_REQUIRE(rvrc.term == 1); DOCTEST_REQUIRE(rvrc.vote_granted); r0.recv_message(reinterpret_cast<uint8_t*>(&rvrc), sizeof(rvrc)); DOCTEST_INFO( "Node 0 is now leader, and sends empty append entries to other nodes"); DOCTEST_REQUIRE(r0.is_primary()); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.size() == 2); ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.sort(by_0); auto ae = ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.front(); ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.pop_front(); DOCTEST_REQUIRE(get<0>(ae) == node_id1); auto aec = get<1>(ae); DOCTEST_REQUIRE(aec.idx == 0); DOCTEST_REQUIRE(aec.term == 1); DOCTEST_REQUIRE(aec.prev_idx == 0); DOCTEST_REQUIRE(aec.prev_term == aft::ViewHistory::InvalidView); DOCTEST_REQUIRE(aec.leader_commit_idx == 0); ae = ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.front(); ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.pop_front(); DOCTEST_REQUIRE(get<0>(ae) == node_id2); aec = get<1>(ae); DOCTEST_REQUIRE(aec.idx == 0); DOCTEST_REQUIRE(aec.term == 1); DOCTEST_REQUIRE(aec.prev_idx == 0); DOCTEST_REQUIRE(aec.prev_term == aft::ViewHistory::InvalidView); DOCTEST_REQUIRE(aec.leader_commit_idx == 0); } template <class NodeMap, class Messages> static size_t dispatch_all(NodeMap& nodes, Messages& messages) { size_t count = 0; while (messages.size()) { auto message = messages.front(); messages.pop_front(); auto tgt_node_id = get<0>(message); auto contents = get<1>(message); nodes[tgt_node_id]->recv_message( reinterpret_cast<uint8_t*>(&contents), sizeof(contents)); count++; } return count; } template <class NodeMap, class Messages, class Assertion> static size_t dispatch_all_and_DOCTEST_CHECK( NodeMap& nodes, Messages& messages, const Assertion& assertion) { size_t count = 0; while (messages.size()) { auto message = messages.front(); messages.pop_front(); auto tgt_node_id = get<0>(message); auto contents = get<1>(message); assertion(contents); nodes[tgt_node_id]->recv_message( reinterpret_cast<uint8_t*>(&contents), sizeof(contents)); count++; } return count; } DOCTEST_TEST_CASE( "Multiple nodes append entries" * doctest::test_suite("multiple")) { auto kv_store0 = std::make_shared<Store>(0); auto kv_store1 = std::make_shared<Store>(1); auto kv_store2 = std::make_shared<Store>(2); aft::NodeId node_id0(0); aft::NodeId node_id1(1); aft::NodeId node_id2(2); ms request_timeout(10); TRaft r0( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store0), std::make_unique<aft::LedgerStubProxy>(node_id0), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id0), nullptr, nullptr, nullptr, request_timeout, ms(20), ms(1000)); TRaft r1( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store1), std::make_unique<aft::LedgerStubProxy>(node_id1), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id1), nullptr, nullptr, nullptr, request_timeout, ms(100), ms(1000)); TRaft r2( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store2), std::make_unique<aft::LedgerStubProxy>(node_id2), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id2), nullptr, nullptr, nullptr, request_timeout, ms(50), ms(1000)); aft::Configuration::Nodes config; config[node_id0] = {}; config[node_id1] = {}; config[node_id2] = {}; r0.add_configuration(0, config); r1.add_configuration(0, config); r2.add_configuration(0, config); map<aft::NodeId, TRaft*> nodes; nodes[node_id0] = &r0; nodes[node_id1] = &r1; nodes[node_id2] = &r2; r0.periodic(std::chrono::milliseconds(200)); DOCTEST_INFO("Send request_votes to other nodes"); DOCTEST_REQUIRE( 2 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_request_vote)); DOCTEST_INFO("Send request_vote_reponses back"); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r1.channels.get())->sent_request_vote_response)); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r2.channels.get())->sent_request_vote_response)); DOCTEST_INFO("Send empty append_entries to other nodes"); DOCTEST_REQUIRE( 2 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries)); DOCTEST_INFO("Send append_entries_reponses back"); DOCTEST_REQUIRE( 1 == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r1.channels.get())->sent_append_entries_response, [](const auto& msg) { DOCTEST_REQUIRE(msg.last_log_idx == 0); DOCTEST_REQUIRE(msg.success == aft::AppendEntriesResponseType::OK); })); DOCTEST_REQUIRE( 1 == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r2.channels.get())->sent_append_entries_response, [](const auto& msg) { DOCTEST_REQUIRE(msg.last_log_idx == 0); DOCTEST_REQUIRE(msg.success == aft::AppendEntriesResponseType::OK); })); DOCTEST_INFO("There ought to be no messages pending anywhere now"); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_msg_count() == 0); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r1.channels.get())->sent_msg_count() == 0); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r2.channels.get())->sent_msg_count() == 0); DOCTEST_INFO("Try to replicate on a follower, and fail"); std::vector<uint8_t> entry = {1, 2, 3}; auto data = std::make_shared<std::vector<uint8_t>>(entry); auto hooks = std::make_shared<kv::ConsensusHookPtrs>(); DOCTEST_REQUIRE_FALSE( r1.replicate(kv::BatchVector{{1, data, true, hooks}}, 1)); DOCTEST_INFO("Tell the leader to replicate a message"); DOCTEST_REQUIRE(r0.replicate(kv::BatchVector{{1, data, true, hooks}}, 1)); DOCTEST_REQUIRE(r0.ledger->ledger.size() == 1); DOCTEST_REQUIRE(*r0.ledger->ledger.front() == entry); DOCTEST_INFO("The other nodes are not told about this yet"); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_msg_count() == 0); r0.periodic(ms(10)); DOCTEST_INFO("Now the other nodes are sent append_entries"); DOCTEST_REQUIRE( 2 == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries, [](const auto& msg) { DOCTEST_REQUIRE(msg.idx == 1); DOCTEST_REQUIRE(msg.term == 1); DOCTEST_REQUIRE(msg.prev_idx == 0); DOCTEST_REQUIRE(msg.prev_term == aft::ViewHistory::InvalidView); DOCTEST_REQUIRE(msg.leader_commit_idx == 0); })); DOCTEST_INFO("Which they acknowledge correctly"); DOCTEST_REQUIRE( 1 == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r1.channels.get())->sent_append_entries_response, [](const auto& msg) { DOCTEST_REQUIRE(msg.last_log_idx == 1); DOCTEST_REQUIRE(msg.success == aft::AppendEntriesResponseType::OK); })); DOCTEST_REQUIRE( 1 == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r2.channels.get())->sent_append_entries_response, [](const auto& msg) { DOCTEST_REQUIRE(msg.last_log_idx == 1); DOCTEST_REQUIRE(msg.success == aft::AppendEntriesResponseType::OK); })); } DOCTEST_TEST_CASE("Multiple nodes late join" * doctest::test_suite("multiple")) { auto kv_store0 = std::make_shared<Store>(0); auto kv_store1 = std::make_shared<Store>(1); auto kv_store2 = std::make_shared<Store>(2); aft::NodeId node_id0(0); aft::NodeId node_id1(1); aft::NodeId node_id2(2); ms request_timeout(10); TRaft r0( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store0), std::make_unique<aft::LedgerStubProxy>(node_id0), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id0), nullptr, nullptr, nullptr, request_timeout, ms(20), ms(1000)); TRaft r1( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store1), std::make_unique<aft::LedgerStubProxy>(node_id1), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id1), nullptr, nullptr, nullptr, request_timeout, ms(100), ms(1000)); TRaft r2( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store2), std::make_unique<aft::LedgerStubProxy>(node_id2), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id2), nullptr, nullptr, nullptr, request_timeout, ms(50), ms(1000)); aft::Configuration::Nodes config; config[node_id0] = {}; config[node_id1] = {}; r0.add_configuration(0, config); r1.add_configuration(0, config); map<aft::NodeId, TRaft*> nodes; nodes[node_id0] = &r0; nodes[node_id1] = &r1; r0.periodic(std::chrono::milliseconds(200)); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_request_vote)); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r1.channels.get())->sent_request_vote_response)); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries)); DOCTEST_REQUIRE( 1 == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r1.channels.get())->sent_append_entries_response, [](const auto& msg) { DOCTEST_REQUIRE(msg.last_log_idx == 0); DOCTEST_REQUIRE(msg.success == aft::AppendEntriesResponseType::OK); })); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_msg_count() == 0); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r1.channels.get())->sent_msg_count() == 0); std::vector<uint8_t> first_entry = {1, 2, 3}; auto data = std::make_shared<std::vector<uint8_t>>(first_entry); auto hooks = std::make_shared<kv::ConsensusHookPtrs>(); DOCTEST_REQUIRE(r0.replicate(kv::BatchVector{{1, data, true, hooks}}, 1)); r0.periodic(ms(10)); DOCTEST_REQUIRE( 1 == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries, [](const auto& msg) { DOCTEST_REQUIRE(msg.idx == 1); DOCTEST_REQUIRE(msg.term == 1); DOCTEST_REQUIRE(msg.prev_idx == 0); DOCTEST_REQUIRE(msg.prev_term == aft::ViewHistory::InvalidView); DOCTEST_REQUIRE(msg.leader_commit_idx == 0); })); DOCTEST_REQUIRE( 1 == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r1.channels.get())->sent_append_entries_response, [](const auto& msg) { DOCTEST_REQUIRE(msg.last_log_idx == 1); DOCTEST_REQUIRE(msg.success == aft::AppendEntriesResponseType::OK); })); DOCTEST_INFO("Node 2 joins the ensemble"); aft::Configuration::Nodes config1; config1[node_id0] = {}; config1[node_id1] = {}; config1[node_id2] = {}; r0.add_configuration(0, config1); r1.add_configuration(0, config1); r2.add_configuration(0, config1); nodes[node_id2] = &r2; DOCTEST_INFO("Node 0 sends Node 2 what it's missed by joining late"); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r2.channels.get())->sent_msg_count() == 0); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r1.channels.get())->sent_msg_count() == 0); DOCTEST_REQUIRE( 1 == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries, [](const auto& msg) { DOCTEST_REQUIRE(msg.idx == 1); DOCTEST_REQUIRE(msg.term == 1); DOCTEST_REQUIRE(msg.prev_idx == 1); DOCTEST_REQUIRE(msg.prev_term == 1); DOCTEST_REQUIRE(msg.leader_commit_idx == 1); })); } DOCTEST_TEST_CASE("Recv append entries logic" * doctest::test_suite("multiple")) { auto kv_store0 = std::make_shared<Store>(0); auto kv_store1 = std::make_shared<Store>(1); aft::NodeId node_id0(0); aft::NodeId node_id1(1); ms request_timeout(10); TRaft r0( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store0), std::make_unique<aft::LedgerStubProxy>(node_id0), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id0), nullptr, nullptr, nullptr, request_timeout, ms(20), ms(1000)); TRaft r1( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store1), std::make_unique<aft::LedgerStubProxy>(node_id1), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id1), nullptr, nullptr, nullptr, request_timeout, ms(100), ms(1000)); aft::Configuration::Nodes config0; config0[node_id0] = {}; config0[node_id1] = {}; r0.add_configuration(0, config0); r1.add_configuration(0, config0); map<aft::NodeId, TRaft*> nodes; nodes[node_id0] = &r0; nodes[node_id1] = &r1; r0.periodic(std::chrono::milliseconds(200)); DOCTEST_INFO("Initial election"); { DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_request_vote)); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r1.channels.get()) ->sent_request_vote_response)); DOCTEST_REQUIRE(r0.is_primary()); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.size() == 1); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries)); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.size() == 0); } aft::AppendEntries ae_idx_2; // To save for later use DOCTEST_INFO("Replicate two entries"); { std::vector<uint8_t> first_entry = {1, 1, 1}; auto data_1 = std::make_shared<std::vector<uint8_t>>(first_entry); std::vector<uint8_t> second_entry = {2, 2, 2}; auto data_2 = std::make_shared<std::vector<uint8_t>>(second_entry); auto hooks = std::make_shared<kv::ConsensusHookPtrs>(); DOCTEST_REQUIRE(r0.replicate(kv::BatchVector{{1, data_1, true, hooks}}, 1)); DOCTEST_REQUIRE(r0.replicate(kv::BatchVector{{2, data_2, true, hooks}}, 1)); DOCTEST_REQUIRE(r0.ledger->ledger.size() == 2); r0.periodic(ms(10)); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.size() == 1); // Receive append entries (idx: 2, prev_idx: 0) ae_idx_2 = ((aft::ChannelStubProxy*)r0.channels.get()) ->sent_append_entries.front() .second; r1.recv_message(reinterpret_cast<uint8_t*>(&ae_idx_2), sizeof(ae_idx_2)); DOCTEST_REQUIRE(r1.ledger->ledger.size() == 2); } DOCTEST_INFO("Receiving same append entries has no effect"); { DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries)); DOCTEST_REQUIRE(r1.ledger->ledger.size() == 2); } DOCTEST_INFO("Replicate one more entry but send AE all entries"); { std::vector<uint8_t> third_entry = {3, 3, 3}; auto data = std::make_shared<std::vector<uint8_t>>(third_entry); auto hooks = std::make_shared<kv::ConsensusHookPtrs>(); DOCTEST_REQUIRE(r0.replicate(kv::BatchVector{{3, data, true, hooks}}, 1)); DOCTEST_REQUIRE(r0.ledger->ledger.size() == 3); // Simulate that the append entries was not deserialised successfully // This ensures that r0 re-sends an AE with prev_idx = 0 next time auto aer = ((aft::ChannelStubProxy*)r1.channels.get()) ->sent_append_entries_response.front() .second; ((aft::ChannelStubProxy*)r1.channels.get()) ->sent_append_entries_response.pop_front(); aer.success = aft::AppendEntriesResponseType::FAIL; r0.recv_message(reinterpret_cast<uint8_t*>(&aer), sizeof(aer)); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.size() == 1); // Only the third entry is deserialised r1.ledger->reset_skip_count(); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries)); DOCTEST_REQUIRE(r0.ledger->ledger.size() == 3); DOCTEST_REQUIRE(r1.ledger->skip_count == 2); r1.ledger->reset_skip_count(); } DOCTEST_INFO("Receiving stale append entries has no effect"); { r1.recv_message(reinterpret_cast<uint8_t*>(&ae_idx_2), sizeof(ae_idx_2)); DOCTEST_REQUIRE(r1.ledger->ledger.size() == 3); } DOCTEST_INFO("Replicate one more entry (normal behaviour)"); { std::vector<uint8_t> fourth_entry = {4, 4, 4}; auto data = std::make_shared<std::vector<uint8_t>>(fourth_entry); auto hooks = std::make_shared<kv::ConsensusHookPtrs>(); DOCTEST_REQUIRE(r0.replicate(kv::BatchVector{{4, data, true, hooks}}, 1)); DOCTEST_REQUIRE(r0.ledger->ledger.size() == 4); r0.periodic(ms(10)); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.size() == 1); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries)); DOCTEST_REQUIRE(r1.ledger->ledger.size() == 4); } DOCTEST_INFO( "Replicate one more entry without AE response from previous entry"); { std::vector<uint8_t> fifth_entry = {5, 5, 5}; auto data = std::make_shared<std::vector<uint8_t>>(fifth_entry); auto hooks = std::make_shared<kv::ConsensusHookPtrs>(); DOCTEST_REQUIRE(r0.replicate(kv::BatchVector{{5, data, true, hooks}}, 1)); DOCTEST_REQUIRE(r0.ledger->ledger.size() == 5); r0.periodic(ms(10)); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.size() == 1); ((aft::ChannelStubProxy*)r0.channels.get()) ->sent_append_entries.pop_front(); // Simulate that the append entries was not deserialised successfully // This ensures that r0 re-sends an AE with prev_idx = 3 next time auto aer = ((aft::ChannelStubProxy*)r1.channels.get()) ->sent_append_entries_response.front() .second; ((aft::ChannelStubProxy*)r1.channels.get()) ->sent_append_entries_response.pop_front(); aer.success = aft::AppendEntriesResponseType::FAIL; r0.recv_message(reinterpret_cast<uint8_t*>(&aer), sizeof(aer)); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.size() == 1); // Receive append entries (idx: 5, prev_idx: 3) r1.ledger->reset_skip_count(); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries)); DOCTEST_REQUIRE(r1.ledger->ledger.size() == 5); DOCTEST_REQUIRE(r1.ledger->skip_count == 2); } } DOCTEST_TEST_CASE("Exceed append entries limit") { logger::config::level() = logger::INFO; auto kv_store0 = std::make_shared<Store>(0); auto kv_store1 = std::make_shared<Store>(1); auto kv_store2 = std::make_shared<Store>(2); aft::NodeId node_id0(0); aft::NodeId node_id1(1); aft::NodeId node_id2(2); ms request_timeout(10); TRaft r0( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store0), std::make_unique<aft::LedgerStubProxy>(node_id0), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id0), nullptr, nullptr, nullptr, request_timeout, ms(20), ms(1000)); TRaft r1( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store1), std::make_unique<aft::LedgerStubProxy>(node_id1), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id1), nullptr, nullptr, nullptr, request_timeout, ms(100), ms(1000)); TRaft r2( ConsensusType::CFT, std::make_unique<Adaptor>(kv_store2), std::make_unique<aft::LedgerStubProxy>(node_id2), std::make_shared<aft::ChannelStubProxy>(), std::make_shared<aft::StubSnapshotter>(), nullptr, nullptr, cert, std::make_shared<aft::State>(node_id2), nullptr, nullptr, nullptr, request_timeout, ms(50), ms(1000)); aft::Configuration::Nodes config0; config0[node_id0] = {}; config0[node_id1] = {}; r0.add_configuration(0, config0); r1.add_configuration(0, config0); map<aft::NodeId, TRaft*> nodes; nodes[node_id0] = &r0; nodes[node_id1] = &r1; r0.periodic(std::chrono::milliseconds(200)); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_request_vote)); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r1.channels.get())->sent_request_vote_response)); DOCTEST_REQUIRE( 1 == dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries)); DOCTEST_REQUIRE( 1 == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r1.channels.get())->sent_append_entries_response, [](const auto& msg) { DOCTEST_REQUIRE(msg.last_log_idx == 0); DOCTEST_REQUIRE(msg.success == aft::AppendEntriesResponseType::OK); })); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r0.channels.get())->sent_msg_count() == 0); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r1.channels.get())->sent_msg_count() == 0); // large entries of size (append_entries_size_limit / 2), so 2nd and 4th entry // will exceed append entries limit size which means that 2nd and 4th entries // will trigger send_append_entries() auto data = std::make_shared<::vector<uint8_t>>((r0.append_entries_size_limit / 2), 1); // I want to get ~500 messages sent over 1mill entries auto individual_entries = 1000000; auto num_small_entries_sent = 500; auto num_big_entries = 4; // send_append_entries() triggered or not bool msg_response = false; for (size_t i = 1; i <= num_big_entries; ++i) { auto hooks = std::make_shared<kv::ConsensusHookPtrs>(); DOCTEST_REQUIRE(r0.replicate(kv::BatchVector{{i, data, true, hooks}}, 1)); DOCTEST_REQUIRE( msg_response == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries, [&i](const auto& msg) { DOCTEST_REQUIRE(msg.idx == i); DOCTEST_REQUIRE(msg.term == 1); DOCTEST_REQUIRE(msg.prev_idx == ((i <= 2) ? 0 : 2)); })); msg_response = !msg_response; } int data_size = (num_small_entries_sent * r0.append_entries_size_limit) / (individual_entries - num_big_entries); auto smaller_data = std::make_shared<std::vector<uint8_t>>(data_size, 1); for (size_t i = num_big_entries + 1; i <= individual_entries; ++i) { auto hooks = std::make_shared<kv::ConsensusHookPtrs>(); DOCTEST_REQUIRE( r0.replicate(kv::BatchVector{{i, smaller_data, true, hooks}}, 1)); dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries); } DOCTEST_INFO("Node 2 joins the ensemble"); aft::Configuration::Nodes config1; config1[node_id0] = {}; config1[node_id1] = {}; config1[node_id2] = {}; r0.add_configuration(0, config1); r1.add_configuration(0, config1); r2.add_configuration(0, config1); nodes[node_id2] = &r2; DOCTEST_INFO("Node 0 sends Node 2 what it's missed by joining late"); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r2.channels.get())->sent_msg_count() == 0); DOCTEST_REQUIRE( 1 == dispatch_all_and_DOCTEST_CHECK( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries, [&individual_entries](const auto& msg) { DOCTEST_REQUIRE(msg.idx == individual_entries); DOCTEST_REQUIRE(msg.term == 1); DOCTEST_REQUIRE(msg.prev_idx == individual_entries); })); DOCTEST_REQUIRE(r2.ledger->ledger.size() == 0); DOCTEST_REQUIRE(r0.ledger->ledger.size() == individual_entries); DOCTEST_INFO("Node 2 asks for Node 0 to send all the data up to now"); DOCTEST_REQUIRE( ((aft::ChannelStubProxy*)r2.channels.get()) ->sent_append_entries_response.size() == 1); auto aer = ((aft::ChannelStubProxy*)r2.channels.get()) ->sent_append_entries_response.front() .second; ((aft::ChannelStubProxy*)r2.channels.get()) ->sent_append_entries_response.pop_front(); r0.recv_message(reinterpret_cast<uint8_t*>(&aer), sizeof(aer)); DOCTEST_REQUIRE( (((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.size() > num_small_entries_sent && ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries.size() <= num_small_entries_sent + num_big_entries)); auto sent_entries = dispatch_all( nodes, ((aft::ChannelStubProxy*)r0.channels.get())->sent_append_entries); DOCTEST_REQUIRE( (sent_entries > num_small_entries_sent && sent_entries <= num_small_entries_sent + num_big_entries)); DOCTEST_REQUIRE(r2.ledger->ledger.size() == individual_entries); }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 by Contributors * \file type_relations.cc * \brief A set of utilities and common functionality * for type relations. */ #include <tvm/relay/expr.h> #include <tvm/relay/op.h> #include <tvm/ir_pass.h> #include <numeric> #include "./type_relations.h" namespace tvm { namespace relay { TensorType ToTensorType(const Type& t) { if (const auto* tt_node = t.as<TensorTypeNode>()) { return GetRef<TensorType>(tt_node); } else { return TensorType(nullptr); } } bool IdentityRel(const Array<Type>& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { for (size_t i = 1; i < types.size(); ++i) { reporter->Assign(types[i], types[0]); } return true; } bool EqualCheck(const IndexExpr& lhs, const IndexExpr& rhs) { IndexExpr diff = lhs - rhs; if (const int64_t* pdiff = as_const_int(diff)) { return pdiff[0] == 0; } // symbolic diff = tvm::ir::CanonicalSimplify(diff); if (const int64_t* pdiff = as_const_int(diff)) { return pdiff[0] == 0; } return false; } bool EqualConstInt(const IndexExpr& lhs, int64_t value) { if (const int64_t* pvalue = as_const_int(lhs)) { return pvalue[0] == value; } return false; } Type ConcreteBroadcast(const TensorType& t1, const TensorType& t2, DataType output_dtype) { std::vector<IndexExpr> oshape; size_t ndim1 = t1->shape.size(); size_t ndim2 = t2->shape.size(); size_t i = 1; for (; i <= std::min(ndim1, ndim2); ++i) { IndexExpr s1 = t1->shape[ndim1 - i]; IndexExpr s2 = t2->shape[ndim2 - i]; if (EqualConstInt(s1, 1)) { oshape.push_back(s2); } else if (EqualConstInt(s2, 1)) { oshape.push_back(s1); } else if (s1.as<Any>()) { // s1 == 1 || s1 == s2 oshape.push_back(s2); } else if (s2.as<Any>()) { // s2 == 1 || s2 == s1 oshape.push_back(s1); } else if (EqualCheck(s1, s2)) { oshape.push_back(s1); } else { RELAY_ERROR( "Incompatible broadcast type " << t1 << " and " << t2).Raise(); } } size_t max_ndim = std::max(ndim1, ndim2); auto& rshape = (ndim1 > ndim2) ? t1->shape : t2->shape; for (; i <= max_ndim; ++i) { oshape.push_back(rshape[max_ndim - i]); } return TensorTypeNode::make(Array<IndexExpr>( oshape.rbegin(), oshape.rend()), output_dtype); } bool BroadcastRel(const Array<Type>& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { CHECK_EQ(types.size(), 3); // DLOG(INFO) << "In1:" << types[0] << ",In2:" << types[1] // << ",Out:" << types[2] << std::endl; if (auto t0 = ToTensorType(types[0])) { if (auto t1 = ToTensorType(types[1])) { CHECK_EQ(t0->dtype, t1->dtype); reporter->Assign(types[2], ConcreteBroadcast(t0, t1, t0->dtype)); return true; } } return false; } bool BroadcastCompRel(const Array<Type>& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { CHECK_EQ(types.size(), 3); // DLOG(INFO) << "In1:" << types[0] << ",In2:" << types[1] // << ",Out:" << types[2] << std::endl; if (auto t0 = ToTensorType(types[0])) { if (auto t1 = ToTensorType(types[1])) { CHECK_EQ(t0->dtype, t1->dtype); reporter->Assign(types[2], ConcreteBroadcast(t0, t1, ::tvm::Bool())); return true; } } return false; } } // namespace relay } // namespace tvm
#include<vector> #include<iostream> using std::vector; using std::cout; using std::endl; int main(){ vector<int> vi = {0,1,2,3,4,5,6,7,8,9}; auto iter = vi.begin(); while(iter != vi.end()){ if(*iter % 2){ iter = vi.insert(iter, *iter); ++iter; } ++iter; } for(auto i : vi) cout << i << " "; cout << endl; return 0; } // main
/*********************************************************************************************************************** * OpenStudio(R), Copyright (c) 2008-2020, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the * following conditions are met: * * (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following * disclaimer. * * (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided with the distribution. * * (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission from the respective party. * * (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative works * may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without specific prior * written permission from Alliance for Sustainable Energy, LLC. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED * STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***********************************************************************************************************************/ #include "../ReverseTranslator.hpp" #include "../../model/HeatBalanceAlgorithm.hpp" #include "../../model/HeatBalanceAlgorithm_Impl.hpp" #include <utilities/idd/HeatBalanceAlgorithm_FieldEnums.hxx> #include "../../utilities/idd/IddEnums.hpp" #include <utilities/idd/IddEnums.hxx> #include "../../utilities/core/Assert.hpp" #include "../../utilities/core/Optional.hpp" using namespace openstudio::model; namespace openstudio { namespace energyplus { boost::optional<model::ModelObject> ReverseTranslator::translateHeatBalanceAlgorithm( const WorkspaceObject& workspaceObject) { OS_ASSERT(workspaceObject.iddObject().type() == IddObjectType::HeatBalanceAlgorithm); HeatBalanceAlgorithm heatBalanceAlgorithm = m_model.getUniqueModelObject<HeatBalanceAlgorithm>(); OptionalString s = workspaceObject.getString(HeatBalanceAlgorithmFields::Algorithm,false,true); if (s) { heatBalanceAlgorithm.setAlgorithm(*s); } OptionalDouble d = workspaceObject.getDouble(HeatBalanceAlgorithmFields::SurfaceTemperatureUpperLimit); if (d) { heatBalanceAlgorithm.setSurfaceTemperatureUpperLimit(*d); } d = workspaceObject.getDouble(HeatBalanceAlgorithmFields::MinimumSurfaceConvectionHeatTransferCoefficientValue); if (d) { heatBalanceAlgorithm.setMinimumSurfaceConvectionHeatTransferCoefficientValue(*d); } d = workspaceObject.getDouble(HeatBalanceAlgorithmFields::MaximumSurfaceConvectionHeatTransferCoefficientValue); if (d) { heatBalanceAlgorithm.setMaximumSurfaceConvectionHeatTransferCoefficientValue(*d); } return heatBalanceAlgorithm.cast<ModelObject>(); } } // energyplus } // openstudio
// Copyright 2017 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "test/unittests/test-utils.h" #include "testing/gmock/include/gmock/gmock.h" #include "src/wasm/function-compiler.h" #include "src/wasm/jump-table-assembler.h" #include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-engine.h" namespace v8 { namespace internal { namespace wasm { namespace wasm_heap_unittest { class DisjointAllocationPoolTest : public ::testing::Test { public: void CheckPool(const DisjointAllocationPool& mem, std::initializer_list<base::AddressRegion> expected_regions); void CheckRange(base::AddressRegion region1, base::AddressRegion region2); DisjointAllocationPool Make( std::initializer_list<base::AddressRegion> regions); }; void DisjointAllocationPoolTest::CheckPool( const DisjointAllocationPool& mem, std::initializer_list<base::AddressRegion> expected_regions) { const auto& regions = mem.regions(); EXPECT_EQ(regions.size(), expected_regions.size()); auto iter = expected_regions.begin(); for (auto it = regions.begin(), e = regions.end(); it != e; ++it, ++iter) { EXPECT_EQ(*it, *iter); } } void DisjointAllocationPoolTest::CheckRange(base::AddressRegion region1, base::AddressRegion region2) { EXPECT_EQ(region1, region2); } DisjointAllocationPool DisjointAllocationPoolTest::Make( std::initializer_list<base::AddressRegion> regions) { DisjointAllocationPool ret; for (auto& region : regions) { ret.Merge(region); } return ret; } TEST_F(DisjointAllocationPoolTest, ConstructEmpty) { DisjointAllocationPool a; EXPECT_TRUE(a.IsEmpty()); CheckPool(a, {}); a.Merge({1, 4}); CheckPool(a, {{1, 4}}); } TEST_F(DisjointAllocationPoolTest, ConstructWithRange) { DisjointAllocationPool a({1, 4}); EXPECT_FALSE(a.IsEmpty()); CheckPool(a, {{1, 4}}); } TEST_F(DisjointAllocationPoolTest, SimpleExtract) { DisjointAllocationPool a = Make({{1, 4}}); base::AddressRegion b = a.Allocate(2); CheckPool(a, {{3, 2}}); CheckRange(b, {1, 2}); a.Merge(b); CheckPool(a, {{1, 4}}); EXPECT_EQ(a.regions().size(), uint32_t{1}); EXPECT_EQ(a.regions().begin()->begin(), uint32_t{1}); EXPECT_EQ(a.regions().begin()->end(), uint32_t{5}); } TEST_F(DisjointAllocationPoolTest, ExtractAll) { DisjointAllocationPool a({1, 4}); base::AddressRegion b = a.Allocate(4); CheckRange(b, {1, 4}); EXPECT_TRUE(a.IsEmpty()); a.Merge(b); CheckPool(a, {{1, 4}}); } TEST_F(DisjointAllocationPoolTest, FailToExtract) { DisjointAllocationPool a = Make({{1, 4}}); base::AddressRegion b = a.Allocate(5); CheckPool(a, {{1, 4}}); EXPECT_TRUE(b.is_empty()); } TEST_F(DisjointAllocationPoolTest, FailToExtractExact) { DisjointAllocationPool a = Make({{1, 4}, {10, 4}}); base::AddressRegion b = a.Allocate(5); CheckPool(a, {{1, 4}, {10, 4}}); EXPECT_TRUE(b.is_empty()); } TEST_F(DisjointAllocationPoolTest, ExtractExact) { DisjointAllocationPool a = Make({{1, 4}, {10, 5}}); base::AddressRegion b = a.Allocate(5); CheckPool(a, {{1, 4}}); CheckRange(b, {10, 5}); } TEST_F(DisjointAllocationPoolTest, Merging) { DisjointAllocationPool a = Make({{10, 5}, {20, 5}}); a.Merge({15, 5}); CheckPool(a, {{10, 15}}); } TEST_F(DisjointAllocationPoolTest, MergingFirst) { DisjointAllocationPool a = Make({{10, 5}, {20, 5}}); a.Merge({5, 5}); CheckPool(a, {{5, 10}, {20, 5}}); } TEST_F(DisjointAllocationPoolTest, MergingAbove) { DisjointAllocationPool a = Make({{10, 5}, {25, 5}}); a.Merge({20, 5}); CheckPool(a, {{10, 5}, {20, 10}}); } TEST_F(DisjointAllocationPoolTest, MergingMore) { DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}}); a.Merge({15, 5}); a.Merge({25, 5}); CheckPool(a, {{10, 25}}); } TEST_F(DisjointAllocationPoolTest, MergingSkip) { DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}}); a.Merge({25, 5}); CheckPool(a, {{10, 5}, {20, 15}}); } TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrc) { DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}}); a.Merge({25, 5}); a.Merge({35, 5}); CheckPool(a, {{10, 5}, {20, 20}}); } TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrcWithGap) { DisjointAllocationPool a = Make({{10, 5}, {20, 5}, {30, 5}}); a.Merge({25, 5}); a.Merge({36, 4}); CheckPool(a, {{10, 5}, {20, 15}, {36, 4}}); } } // namespace wasm_heap_unittest } // namespace wasm } // namespace internal } // namespace v8
// Autogenerated from CppHeaderCreator // Created by Sc2ad // ========================================================================= #pragma once #pragma pack(push, 8) // Begin includes #include "extern/beatsaber-hook/shared/utils/typedefs.h" // Completed includes // Type namespace: System namespace System { // Autogenerated type: System.CLRConfig class CLRConfig : public ::Il2CppObject { public: // static System.Boolean CheckThrowUnobservedTaskExceptions() // Offset: 0x13363BC static bool CheckThrowUnobservedTaskExceptions(); }; // System.CLRConfig } #include "extern/beatsaber-hook/shared/utils/il2cpp-type-check.hpp" DEFINE_IL2CPP_ARG_TYPE(System::CLRConfig*, "System", "CLRConfig"); #pragma pack(pop)
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com, created on 07.10.2017. // @author Yurii Shyrma (iuriish@yahoo.com) // #include <pointercast.h> #include <helpers/shape.h> #include <helpers/TAD.h> #include <specials.h> #include <dll.h> #include <NDArray.h> #include <ops/declarable/CustomOperations.h> #include <types/types.h> namespace nd4j { /** * Concatneate multi array of the same shape together * along a particular dimension */ template <typename T> void SpecialMethods<T>::concatCpuGeneric(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, void *vresult, Nd4jLong *resultShapeInfo) { auto result = reinterpret_cast<T *>(vresult); std::vector<Nd4jLong> iArgs = {dimension}; std::vector<double> tArgs; std::vector<bool> bArgsEmpty; std::vector<NDArray*> inputs(numArrays); std::vector<NDArray*> outputs(1); outputs[0] = new NDArray(static_cast<void*>(result), static_cast<Nd4jLong*>(resultShapeInfo)); for(int i = 0; i < numArrays; ++i) inputs[i] = new NDArray(static_cast<void *>(data[i]), static_cast<Nd4jLong*>(inputShapeInfo[i])); nd4j::ops::concat op; auto status = op.execute(inputs, outputs, tArgs, iArgs, bArgsEmpty); if(status != Status::OK()) throw std::runtime_error("concatCpuGeneric fails to be executed !"); delete outputs[0]; for(int i = 0; i < numArrays; ++i) delete inputs[i]; } /** * This kernel accumulates X arrays, and stores result into Z * * @tparam T * @param x * @param z * @param n * @param length */ template<typename T> void SpecialMethods<T>::accumulateGeneric(void **vx, void *vz, Nd4jLong *zShapeInfo, int n, const Nd4jLong length) { auto z = reinterpret_cast<T *>(vz); auto x = reinterpret_cast<T **>(vx); // aggregation step #ifdef _OPENMP int _threads = omp_get_max_threads(); #else // we can use whatever we want here, this value won't be used if there's no omp int _threads = 4; #endif PRAGMA_OMP_PARALLEL_FOR_SIMD for (Nd4jLong i = 0; i < length; i++) { for (Nd4jLong ar = 0; ar < n; ar++) { z[i] += x[ar][i]; } } } /** * This kernel averages X input arrays, and stores result to Z * * @tparam T * @param x * @param z * @param n * @param length * @param propagate */ template<typename T> void SpecialMethods<T>::averageGeneric(void **vx, void *vz, Nd4jLong *zShapeInfo, int n, const Nd4jLong length, bool propagate) { auto z = reinterpret_cast<T *>(vz); auto x = reinterpret_cast<T **>(vx); if (z == nullptr) { //code branch for absent Z z = x[0]; PRAGMA_OMP_SIMD for (Nd4jLong i = 0; i < length; i++) { z[i] /= n; } #ifdef _OPENNMP int _threads = omp_get_max_threads(); //nd4j::math::nd4j_min<int>(omp_get_max_threads() / 2, 4); #else // we can use whatever we want here, this value won't be used if there's no omp int _threads = 4; #endif PRAGMA_OMP_PARALLEL_FOR_SIMD for (Nd4jLong i = 0; i < length; i++) { for (Nd4jLong ar = 1; ar < n; ar++) { z[i] += x[ar][i] / n; } } // instead of doing element-wise propagation, we just issue memcpy to propagate data for (Nd4jLong ar = 1; ar < n; ar++) { memcpy(x[ar], z, length * sizeof(T)); } } else { // code branch for existing Z // memset before propagation memset(z, 0, length * sizeof(T)); // aggregation step #ifdef _OPENNMP int _threads = omp_get_max_threads(); //nd4j::math::nd4j_min<int>(omp_get_max_threads() / 2, 4); #else // we can use whatever we want here, this value won't be used if there's no omp int _threads = 4; #endif PRAGMA_OMP_PARALLEL_FOR_SIMD for (Nd4jLong i = 0; i < length; i++) { for (Nd4jLong ar = 0; ar < n; ar++) { z[i] += x[ar][i] / n; } } // instead of doing element-wise propagation, we just issue memcpy to propagate data for (Nd4jLong ar = 0; ar < n; ar++) { memcpy(x[ar], z, length * sizeof(T)); } } } template <typename T> Nd4jLong SpecialMethods<T>::getPosition(Nd4jLong *xShapeInfo, Nd4jLong index) { auto xEWS = shape::elementWiseStride(xShapeInfo); if (xEWS == 1) return index; else if (xEWS > 1) return index * xEWS; else return shape::getIndexOffset(index, xShapeInfo, shape::length(xShapeInfo)); } template<typename T> void SpecialMethods<T>::quickSort_parallel_internal(T* array, Nd4jLong *xShapeInfo, int left, int right, int cutoff, bool descending) { int i = left, j = right; T tmp; T pivot = array[getPosition(xShapeInfo, (left + right) / 2)]; { /* PARTITION PART */ while (i <= j) { if (descending) { while (array[getPosition(xShapeInfo, i)] > pivot) i++; while (array[getPosition(xShapeInfo, j)] < pivot) j--; if (i <= j) { tmp = array[getPosition(xShapeInfo, i)]; array[getPosition(xShapeInfo, i)] = array[getPosition(xShapeInfo, j)]; array[getPosition(xShapeInfo, j)] = tmp; i++; j--; } } else { while (array[getPosition(xShapeInfo, i)] < pivot) i++; while (array[getPosition(xShapeInfo, j)] > pivot) j--; if (i <= j) { tmp = array[getPosition(xShapeInfo, i)]; array[getPosition(xShapeInfo, i)] = array[getPosition(xShapeInfo, j)]; array[getPosition(xShapeInfo, j)] = tmp; i++; j--; } } } } // if ( ((right-left)<cutoff) ){ if (left < j){ quickSort_parallel_internal(array, xShapeInfo, left, j, cutoff, descending); } if (i < right){ quickSort_parallel_internal(array, xShapeInfo, i, right, cutoff, descending); } }else{ #pragma omp task { quickSort_parallel_internal(array, xShapeInfo, left, j, cutoff, descending); } #pragma omp task { quickSort_parallel_internal(array, xShapeInfo, i, right, cutoff, descending); } } } template<typename T> void SpecialMethods<T>::quickSort_parallel(void *varray, Nd4jLong *xShapeInfo, Nd4jLong lenArray, int numThreads, bool descending){ auto array = reinterpret_cast<T *>(varray); int cutoff = 1000; PRAGMA_OMP_PARALLEL_THREADS(numThreads) { #pragma omp single nowait { quickSort_parallel_internal(array, xShapeInfo, 0, lenArray-1, cutoff, descending); } } } template <typename T> int SpecialMethods<T>::nextPowerOf2(int number) { int pos = 0; while (number > 0) { pos++; number = number >> 1; } return (int) pow(2, pos); } template <typename T> int SpecialMethods<T>::lastPowerOf2(int number) { int p = 1; while (p <= number) p <<= 1; p >>= 1; return p; } template<typename T> void SpecialMethods<T>::sortGeneric(void *vx, Nd4jLong *xShapeInfo, bool descending) { auto x = reinterpret_cast<T *>(vx); quickSort_parallel(x, xShapeInfo, shape::length(xShapeInfo), omp_get_max_threads(), descending); } template<typename T> void SpecialMethods<T>::sortTadGeneric(void *vx, Nd4jLong *xShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool descending) { auto x = reinterpret_cast<T *>(vx); //quickSort_parallel(x, xShapeInfo, shape::length(xShapeInfo), omp_get_max_threads(), descending); Nd4jLong xLength = shape::length(xShapeInfo); Nd4jLong xTadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); int numTads = xLength / xTadLength; PRAGMA_OMP_PARALLEL_FOR for (int r = 0; r < numTads; r++) { T *dx = x + tadOffsets[r]; quickSort_parallel(dx, tadShapeInfo, xTadLength, 1, descending); } } template<typename T> void SpecialMethods<T>::decodeBitmapGeneric(void *dx, Nd4jLong N, void *vz, Nd4jLong *zShapeInfo) { auto dz = reinterpret_cast<T *>(vz); auto x = reinterpret_cast<int *>(dx); Nd4jLong lim = N / 16 + 5; FloatBits2 fb; fb.i_ = x[2]; float threshold = fb.f_; PRAGMA_OMP_PARALLEL_FOR for (Nd4jLong e = 4; e < lim; e++) { for (int bitId = 0; bitId < 16; bitId++) { bool hasBit = (x[e] & 1 << (bitId) ) != 0; bool hasSign = (x[e] & 1 << (bitId + 16) ) != 0; if (hasBit) { if (hasSign) dz[(e - 4) * 16 + bitId] -= threshold; else dz[(e - 4) * 16 + bitId] += threshold; } else if (hasSign) { dz[(e - 4) * 16 + bitId] -= threshold / 2; } } } } template<typename S, typename T> void SpecialTypeConverter::convertGeneric(Nd4jPointer * extras, void *dx, Nd4jLong N, void *dz) { auto x = reinterpret_cast<S *>(dx); auto z = reinterpret_cast<T *>(dz); if (N < nd4j::Environment::getInstance()->elementwiseThreshold()) { for (int i = 0; i < N; i++) { z[i] = static_cast<T>(x[i]); } } else { PRAGMA_OMP_PARALLEL_FOR for (int i = 0; i < N; i++) { z[i] = static_cast<T>(x[i]); } } }; BUILD_DOUBLE_TEMPLATE(template void SpecialTypeConverter::convertGeneric, (Nd4jPointer * extras, void *dx, Nd4jLong N, void *dz), LIBND4J_TYPES, LIBND4J_TYPES); template<typename T> Nd4jLong SpecialMethods<T>::encodeBitmapGeneric(void *vx, Nd4jLong *xShapeInfo, Nd4jLong N, int *dz, float threshold) { auto dx = reinterpret_cast<T *>(vx); Nd4jLong retVal = 0L; #pragma omp parallel for schedule(guided) proc_bind(close) reduction(+:retVal) for (Nd4jLong x = 0; x < N; x += 16) { int byte = 0; int byteId = x / 16 + 4; for (int f = 0; f < 16; f++) { Nd4jLong e = x + f; if (e >= N) continue; T val = dx[e]; T abs = nd4j::math::nd4j_abs<T>(val); int bitId = e % 16; if (abs >= (T) threshold) { byte |= 1 << (bitId); retVal++; if (val < (T) 0.0f) { byte |= 1 << (bitId + 16); dx[e] += threshold; } else { dx[e] -= threshold; } } else if (abs >= (T) threshold / (T) 2.0f && val < (T) 0.0f) { byte |= 1 << (bitId + 16); dx[e] += threshold / 2; retVal++; } } dz[byteId] = byte; } return retVal; } BUILD_SINGLE_TEMPLATE(template class SpecialMethods, , LIBND4J_TYPES); }
#ifndef BOOST_METAPARSE_ALWAYS_C_HPP #define BOOST_METAPARSE_ALWAYS_C_HPP // Copyright Abel Sinkovics (abel@sinkovics.hu) 2013. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <boost/metaparse/v1/always_c.hpp> namespace boost { namespace metaparse { using v1::always_c; } } #endif
#include "GoRuntime.h" #include "GoResource.h" Go::Runtime *Go::Runtime::Instance = nullptr; Go::Runtime *Go::Runtime::GetInstance() { if (Instance == nullptr) Instance = new Runtime(); return Instance; } alt::IResource::Impl *Go::Runtime::CreateImpl(alt::IResource *impl) { auto resource = new Go::Resource(this, impl); _resources.push_back({{impl->GetName().CStr(), resource}}); return resource; } void Go::Runtime::DestroyImpl(alt::IResource::Impl *impl) { auto resource = dynamic_cast<Go::Resource *>(impl); if (resource != nullptr) delete resource; } alt::IResource::Impl *Go::Runtime::GetResource(const std::string &name) { for (auto &resource: _resources) { if (resource.find(name) != resource.end()) { return resource[name]; } } return nullptr; } alt::MValueArgs Go::Runtime::CreateMValueArgs(CustomData *MValues, unsigned long long size) { alt::MValueArgs args; for (unsigned long long i = 0; i < size; ++i) { switch (static_cast<alt::IMValue::Type>(MValues[i].Type)) { case alt::IMValue::Type::STRING: args.Push(reinterpret_cast<alt::IMValueString *>(MValues[i].mValue)); break; case alt::IMValue::Type::INT: args.Push(reinterpret_cast<alt::IMValueInt *>(MValues[i].mValue)); break; case alt::IMValue::Type::UINT: args.Push(reinterpret_cast<alt::IMValueUInt *>(MValues[i].mValue)); break; case alt::IMValue::Type::DOUBLE: args.Push(reinterpret_cast<alt::IMValueDouble *>(MValues[i].mValue)); break; case alt::IMValue::Type::BOOL: args.Push(reinterpret_cast<alt::IMValueBool *>(MValues[i].mValue)); break; case alt::IMValue::Type::RGBA: args.Push(reinterpret_cast<alt::IMValueRGBA *>(MValues[i].mValue)); break; case alt::IMValue::Type::VECTOR2: args.Push(reinterpret_cast<alt::IMValueVector2 *>(MValues[i].mValue)); break; case alt::IMValue::Type::VECTOR3: args.Push(reinterpret_cast<alt::IMValueVector3 *>(MValues[i].mValue)); break; case alt::IMValue::Type::BYTE_ARRAY: args.Push(reinterpret_cast<alt::IMValueByteArray *>(MValues[i].mValue)); break; case alt::IMValue::Type::BASE_OBJECT: args.Push(reinterpret_cast<alt::IMValueBaseObject *>(MValues[i].mValue)); break; case alt::IMValue::Type::FUNCTION: args.Push(reinterpret_cast<alt::IMValueBool *>(MValues[i].mValue)); break; case alt::IMValue::Type::LIST: args.Push(reinterpret_cast<alt::IMValueList *>(MValues[i].mValue)); break; case alt::IMValue::Type::DICT: args.Push(reinterpret_cast<alt::IMValueDict *>(MValues[i].mValue)); break; } } return args; } alt::RefBase<alt::RefStore<alt::IMValue>> Go::Runtime::CreateMValueFromJSONValue(rapidjson::Value &value) { switch (value.GetType()) { case rapidjson::kNullType: return alt::ICore::Instance().CreateMValueNone(); case rapidjson::kFalseType: case rapidjson::kTrueType: return alt::ICore::Instance().CreateMValueBool(value.GetBool()); case rapidjson::kObjectType: break; case rapidjson::kArrayType: { alt::RefBase<alt::RefStore<alt::IMValueList>> list = alt::ICore::Instance().CreateMValueList( value.GetArray().Size()); list->Push(CreateMValueFromJSONValue(value)); return list; } case rapidjson::kStringType: return alt::ICore::Instance().CreateMValueString(value.GetString()); case rapidjson::kNumberType: return alt::ICore::Instance().CreateMValueInt(value.GetInt()); } } Entity Go::Runtime::GetEntity(alt::Ref<alt::IEntity> entity) { Entity e; if (!entity.IsEmpty()) { auto entityType = entity->GetType(); e.Type = static_cast<unsigned char>(entityType); switch (entityType) { case alt::IEntity::Type::PLAYER: e.Ptr = entity.As<alt::IPlayer>().Get(); break; case alt::IEntity::Type::VEHICLE: e.Ptr = entity.As<alt::IVehicle>().Get(); break; } } else { e.Ptr = nullptr; } return e; }
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "xwalk/runtime/browser/android/net/android_protocol_handler.h" #include <string> #include "base/android/jni_android.h" #include "base/android/jni_helper.h" #include "base/android/jni_string.h" #include "base/strings/string_util.h" #include "content/public/common/url_constants.h" #include "jni/AndroidProtocolHandler_jni.h" #include "net/base/io_buffer.h" #include "net/base/mime_util.h" #include "net/base/net_errors.h" #include "net/base/net_util.h" #include "net/http/http_util.h" #include "net/url_request/protocol_intercept_job_factory.h" #include "net/url_request/url_request.h" #include "url/gurl.h" #include "xwalk/runtime/browser/android/net/android_stream_reader_url_request_job.h" #include "xwalk/runtime/browser/android/net/input_stream_impl.h" #include "xwalk/runtime/browser/android/net/url_constants.h" using base::android::AttachCurrentThread; using base::android::ClearException; using base::android::ConvertUTF8ToJavaString; using base::android::ScopedJavaGlobalRef; using base::android::ScopedJavaLocalRef; using xwalk::InputStream; using xwalk::InputStreamImpl; namespace { // Override resource context for reading resource and asset files. Used for // testing. JavaObjectWeakGlobalRef* g_resource_context = NULL; void ResetResourceContext(JavaObjectWeakGlobalRef* ref) { if (g_resource_context) delete g_resource_context; g_resource_context = ref; } void* kPreviouslyFailedKey = &kPreviouslyFailedKey; void MarkRequestAsFailed(net::URLRequest* request) { request->SetUserData(kPreviouslyFailedKey, new base::SupportsUserData::Data()); } bool HasRequestPreviouslyFailed(net::URLRequest* request) { return request->GetUserData(kPreviouslyFailedKey) != NULL; } class AndroidStreamReaderURLRequestJobDelegateImpl : public AndroidStreamReaderURLRequestJob::Delegate { public: AndroidStreamReaderURLRequestJobDelegateImpl(); virtual scoped_ptr<InputStream> OpenInputStream( JNIEnv* env, const GURL& url) OVERRIDE; virtual void OnInputStreamOpenFailed(net::URLRequest* request, bool* restart) OVERRIDE; virtual bool GetMimeType(JNIEnv* env, net::URLRequest* request, InputStream* stream, std::string* mime_type) OVERRIDE; virtual bool GetCharset(JNIEnv* env, net::URLRequest* request, InputStream* stream, std::string* charset) OVERRIDE; virtual bool GetPackageName(JNIEnv* env, std::string* name) OVERRIDE; virtual ~AndroidStreamReaderURLRequestJobDelegateImpl(); }; class AndroidProtocolHandlerBase : public net::URLRequestJobFactory::ProtocolHandler { public: virtual net::URLRequestJob* MaybeCreateJob( net::URLRequest* request, net::NetworkDelegate* network_delegate) const OVERRIDE; virtual bool CanHandleRequest(const net::URLRequest* request) const = 0; }; class AssetFileProtocolHandler : public AndroidProtocolHandlerBase { public: AssetFileProtocolHandler(); virtual ~AssetFileProtocolHandler() OVERRIDE; virtual bool CanHandleRequest(const net::URLRequest* request) const OVERRIDE; private: // file:///android_asset/ const std::string asset_prefix_; // file:///android_res/ const std::string resource_prefix_; }; // Protocol handler for app:// scheme requests. class AppSchemeProtocolHandler : public AndroidProtocolHandlerBase { public: AppSchemeProtocolHandler(); virtual bool CanHandleRequest(const net::URLRequest* request) const OVERRIDE; }; // Protocol handler for content:// scheme requests. class ContentSchemeProtocolHandler : public AndroidProtocolHandlerBase { public: ContentSchemeProtocolHandler(); virtual bool CanHandleRequest(const net::URLRequest* request) const OVERRIDE; }; static ScopedJavaLocalRef<jobject> GetResourceContext(JNIEnv* env) { if (g_resource_context) return g_resource_context->get(env); ScopedJavaLocalRef<jobject> context; // We have to reset as GetApplicationContext() returns a jobject with a // global ref. The constructor that takes a jobject would expect a local ref // and would assert. context.Reset(env, base::android::GetApplicationContext()); return context; } // AndroidStreamReaderURLRequestJobDelegateImpl ------------------------------- AndroidStreamReaderURLRequestJobDelegateImpl:: AndroidStreamReaderURLRequestJobDelegateImpl() {} AndroidStreamReaderURLRequestJobDelegateImpl:: ~AndroidStreamReaderURLRequestJobDelegateImpl() { } scoped_ptr<InputStream> AndroidStreamReaderURLRequestJobDelegateImpl::OpenInputStream( JNIEnv* env, const GURL& url) { DCHECK(url.is_valid()); DCHECK(env); // Open the input stream. ScopedJavaLocalRef<jstring> jurl = ConvertUTF8ToJavaString(env, url.spec()); ScopedJavaLocalRef<jobject> stream = xwalk::Java_AndroidProtocolHandler_open( env, GetResourceContext(env).obj(), jurl.obj()); // Check and clear pending exceptions. if (ClearException(env) || stream.is_null()) { DLOG(ERROR) << "Unable to open input stream for Android URL"; return scoped_ptr<InputStream>(); } return make_scoped_ptr<InputStream>(new InputStreamImpl(stream)); } void AndroidStreamReaderURLRequestJobDelegateImpl::OnInputStreamOpenFailed( net::URLRequest* request, bool* restart) { DCHECK(!HasRequestPreviouslyFailed(request)); MarkRequestAsFailed(request); *restart = true; } bool AndroidStreamReaderURLRequestJobDelegateImpl::GetMimeType( JNIEnv* env, net::URLRequest* request, xwalk::InputStream* stream, std::string* mime_type) { DCHECK(env); DCHECK(request); DCHECK(mime_type); // Query the mime type from the Java side. It is possible for the query to // fail, as the mime type cannot be determined for all supported schemes. ScopedJavaLocalRef<jstring> url = ConvertUTF8ToJavaString(env, request->url().spec()); const InputStreamImpl* stream_impl = InputStreamImpl::FromInputStream(stream); ScopedJavaLocalRef<jstring> returned_type = xwalk::Java_AndroidProtocolHandler_getMimeType( env, GetResourceContext(env).obj(), stream_impl->jobj(), url.obj()); if (ClearException(env) || returned_type.is_null()) return false; *mime_type = base::android::ConvertJavaStringToUTF8(returned_type); return true; } bool AndroidStreamReaderURLRequestJobDelegateImpl::GetCharset( JNIEnv* env, net::URLRequest* request, xwalk::InputStream* stream, std::string* charset) { // TODO(shouqun): We should probably be getting this from the managed side. return false; } bool AndroidStreamReaderURLRequestJobDelegateImpl::GetPackageName( JNIEnv* env, std::string* name) { ScopedJavaLocalRef<jstring> returned_name = xwalk::Java_AndroidProtocolHandler_getPackageName( env, GetResourceContext(env).obj()); if (ClearException(env) || returned_name.is_null()) return false; *name = base::android::ConvertJavaStringToUTF8(returned_name); return true; } // AndroidProtocolHandlerBase ------------------------------------------------- net::URLRequestJob* AndroidProtocolHandlerBase::MaybeCreateJob( net::URLRequest* request, net::NetworkDelegate* network_delegate) const { if (!CanHandleRequest(request)) return NULL; // For WebViewClassic compatibility this job can only accept URLs that can be // opened. URLs that cannot be opened should be resolved by the next handler. // // If a request is initially handled here but the job fails due to it being // unable to open the InputStream for that request the request is marked as // previously failed and restarted. // Restarting a request involves creating a new job for that request. This // handler will ignore requests know to have previously failed to 1) prevent // an infinite loop, 2) ensure that the next handler in line gets the // opportunity to create a job for the request. if (HasRequestPreviouslyFailed(request)) return NULL; scoped_ptr<AndroidStreamReaderURLRequestJobDelegateImpl> reader_delegate( new AndroidStreamReaderURLRequestJobDelegateImpl()); return new AndroidStreamReaderURLRequestJob( request, network_delegate, reader_delegate.PassAs<AndroidStreamReaderURLRequestJob::Delegate>()); } // AssetFileProtocolHandler --------------------------------------------------- AssetFileProtocolHandler::AssetFileProtocolHandler() : asset_prefix_(std::string(chrome::kFileScheme) + std::string(content::kStandardSchemeSeparator) + xwalk::kAndroidAssetPath), resource_prefix_(std::string(chrome::kFileScheme) + std::string(content::kStandardSchemeSeparator) + xwalk::kAndroidResourcePath) { } AssetFileProtocolHandler::~AssetFileProtocolHandler() { } bool AssetFileProtocolHandler::CanHandleRequest( const net::URLRequest* request) const { if (!request->url().SchemeIsFile()) return false; const std::string& url = request->url().spec(); if (!StartsWithASCII(url, asset_prefix_, /*case_sensitive=*/ true) && !StartsWithASCII(url, resource_prefix_, /*case_sensitive=*/ true)) { return false; } return true; } // ContentSchemeProtocolHandler ContentSchemeProtocolHandler::ContentSchemeProtocolHandler() { } bool ContentSchemeProtocolHandler::CanHandleRequest( const net::URLRequest* request) const { return request->url().SchemeIs(xwalk::kContentScheme); } // AppSchemeProtocolHandler AppSchemeProtocolHandler::AppSchemeProtocolHandler() { } bool AppSchemeProtocolHandler::CanHandleRequest( const net::URLRequest* request) const { return request->url().SchemeIs(xwalk::kAppScheme); } } // namespace namespace xwalk { bool RegisterAndroidProtocolHandler(JNIEnv* env) { return RegisterNativesImpl(env); } // static scoped_ptr<net::URLRequestJobFactory::ProtocolHandler> CreateContentSchemeProtocolHandler() { return make_scoped_ptr<net::URLRequestJobFactory::ProtocolHandler>( new ContentSchemeProtocolHandler()); } // static scoped_ptr<net::URLRequestJobFactory::ProtocolHandler> CreateAssetFileProtocolHandler() { return make_scoped_ptr<net::URLRequestJobFactory::ProtocolHandler>( new AssetFileProtocolHandler()); } // static scoped_ptr<net::URLRequestJobFactory::ProtocolHandler> CreateAppSchemeProtocolHandler() { return make_scoped_ptr<net::URLRequestJobFactory::ProtocolHandler>( new AppSchemeProtocolHandler()); } // Set a context object to be used for resolving resource queries. This can // be used to override the default application context and redirect all // resource queries to a specific context object, e.g., for the purposes of // testing. // // |context| should be a android.content.Context instance or NULL to enable // the use of the standard application context. static void SetResourceContextForTesting(JNIEnv* env, jclass /*clazz*/, jobject context) { if (context) { ResetResourceContext(new JavaObjectWeakGlobalRef(env, context)); } else { ResetResourceContext(NULL); } } static jstring GetAndroidAssetPath(JNIEnv* env, jclass /*clazz*/) { // OK to release, JNI binding. return ConvertUTF8ToJavaString( env, xwalk::kAndroidAssetPath).Release(); } static jstring GetAndroidResourcePath(JNIEnv* env, jclass /*clazz*/) { // OK to release, JNI binding. return ConvertUTF8ToJavaString( env, xwalk::kAndroidResourcePath).Release(); } } // namespace xwalk
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/views/location_bar/location_icon_view.h" #include "chrome/browser/ui/views/frame/browser_view.h" #include "chrome/browser/ui/views/location_bar/location_bar_view.h" #include "chrome/test/base/in_process_browser_test.h" #include "components/omnibox/browser/omnibox_edit_model.h" #include "content/public/test/browser_test.h" #include "ui/views/animation/ink_drop.h" #include "ui/views/animation/test/ink_drop_host_view_test_api.h" // TODO (spqchan): Refine tests. See crbug.com/770873. class LocationIconViewBrowserTest : public InProcessBrowserTest { public: LocationIconViewBrowserTest() {} ~LocationIconViewBrowserTest() override {} protected: void SetUpOnMainThread() override { gfx::FontList font_list; BrowserView* browser_view = BrowserView::GetBrowserViewForBrowser(browser()); location_bar_ = browser_view->GetLocationBarView(); icon_view_ = std::make_unique<LocationIconView>( font_list, location_bar_, location_bar_, browser()->profile()); } LocationBarView* location_bar() const { return location_bar_; } LocationIconView* icon_view() const { return icon_view_.get(); } private: LocationBarView* location_bar_; std::unique_ptr<LocationIconView> icon_view_; DISALLOW_COPY_AND_ASSIGN(LocationIconViewBrowserTest); }; // Check to see if the InkDropMode is off when the omnibox is editing. // Otherwise, it should be on. IN_PROC_BROWSER_TEST_F(LocationIconViewBrowserTest, InkDropMode) { OmniboxEditModel* model = location_bar()->GetOmniboxView()->model(); model->SetInputInProgress(true); icon_view()->Update(/*suppress_animations=*/true); EXPECT_EQ(views::InkDropHost::InkDropMode::OFF, views::test::InkDropHostTestApi(views::InkDrop::Get(icon_view())) .ink_drop_mode()); model->SetInputInProgress(false); icon_view()->Update(/*suppress_animations=*/true); EXPECT_EQ(views::InkDropHost::InkDropMode::ON, views::test::InkDropHostTestApi(views::InkDrop::Get(icon_view())) .ink_drop_mode()); }
/// steam_glue.cpp #include "steam_glue.h" CSteamAPIContext SteamAPI; uint32 steam_app_id = 0; CSteamID steam_local_id; CSteamID steam_lobby_current; // Called by GM on DLL init dllx double RegisterCallbacks(void* f1, void* f2, void* f3, void* f4) { gml_event_perform_async = (gml_event_perform_async_t)f1; gml_ds_map_create_ext = (gml_ds_map_create_ext_t)f2; gml_ds_map_set_double = (gml_ds_map_set_double_t)f3; gml_ds_map_set_string = (gml_ds_map_set_string_t)f4; return 0; }
/* * Copyright 2010 Stephan Aßmus <superstippi@gmx.de> * Copyright 2019, Haiku, Inc. * All rights reserved. Distributed under the terms of the MIT License. */ #include "SettingsWindow.h" #include <Button.h> #include <CheckBox.h> #include <ControlLook.h> #include <FilePanel.h> #include <GridLayoutBuilder.h> #include <GroupLayout.h> #include <GroupLayoutBuilder.h> #include <LayoutBuilder.h> #include <Locale.h> #include <MenuItem.h> #include <MenuField.h> #include <Message.h> #include <PopUpMenu.h> #include <ScrollView.h> #include <SeparatorView.h> #include <SpaceLayoutItem.h> #include <Spinner.h> #include <TabView.h> #include <TextControl.h> #include <debugger.h> #include <stdio.h> #include <stdlib.h> #include "BrowserApp.h" #include "BrowsingHistory.h" #include "BrowserWindow.h" #include "FontSelectionView.h" #include "SettingsKeys.h" #include "SettingsMessage.h" #include "WebSettings.h" #undef B_TRANSLATION_CONTEXT #define B_TRANSLATION_CONTEXT "Settings Window" enum { MSG_APPLY = 'aply', MSG_CANCEL = 'cncl', MSG_REVERT = 'rvrt', MSG_START_PAGE_CHANGED = 'hpch', MSG_SEARCH_PAGE_CHANGED = 'spch', MSG_DOWNLOAD_FOLDER_CHANGED = 'dnfc', MSG_NEW_WINDOWS_BEHAVIOR_CHANGED = 'nwbc', MSG_NEW_TABS_BEHAVIOR_CHANGED = 'ntbc', MSG_START_UP_BEHAVIOR_CHANGED = 'subc', MSG_HISTORY_MENU_DAYS_CHANGED = 'digm', MSG_TAB_DISPLAY_BEHAVIOR_CHANGED = 'tdbc', MSG_AUTO_HIDE_INTERFACE_BEHAVIOR_CHANGED = 'ahic', MSG_AUTO_HIDE_POINTER_BEHAVIOR_CHANGED = 'ahpc', MSG_SHOW_HOME_BUTTON_CHANGED = 'shbc', MSG_STANDARD_FONT_CHANGED = 'stfc', MSG_SERIF_FONT_CHANGED = 'sefc', MSG_SANS_SERIF_FONT_CHANGED = 'ssfc', MSG_FIXED_FONT_CHANGED = 'ffch', MSG_STANDARD_FONT_SIZE_SELECTED = 'sfss', MSG_FIXED_FONT_SIZE_SELECTED = 'ffss', MSG_USE_PROXY_CHANGED = 'upsc', MSG_PROXY_ADDRESS_CHANGED = 'psac', MSG_PROXY_PORT_CHANGED = 'pspc', MSG_USE_PROXY_AUTH_CHANGED = 'upsa', MSG_PROXY_USERNAME_CHANGED = 'psuc', MSG_PROXY_PASSWORD_CHANGED = 'pswc', MSG_CHOOSE_DOWNLOAD_FOLDER = 'swop', MSG_HANDLE_DOWNLOAD_FOLDER = 'oprs', }; static const int32 kDefaultFontSize = 14; SettingsWindow::SettingsWindow(BRect frame, SettingsMessage* settings) : BWindow(frame, B_TRANSLATE("Settings"), B_TITLED_WINDOW_LOOK, B_NORMAL_WINDOW_FEEL, B_AUTO_UPDATE_SIZE_LIMITS | B_ASYNCHRONOUS_CONTROLS | B_NOT_ZOOMABLE), fSettings(settings) { fApplyButton = new BButton(B_TRANSLATE("Apply"), new BMessage(MSG_APPLY)); fCancelButton = new BButton(B_TRANSLATE("Cancel"), new BMessage(MSG_CANCEL)); fRevertButton = new BButton(B_TRANSLATE("Revert"), new BMessage(MSG_REVERT)); fOpenFilePanel = NULL; float spacing = be_control_look->DefaultItemSpacing(); BTabView* tabView = new BTabView("settings pages", B_WIDTH_FROM_LABEL); tabView->SetBorder(B_NO_BORDER); BLayoutBuilder::Group<>(this, B_VERTICAL, 0) .SetInsets(0, B_USE_DEFAULT_SPACING, 0, B_USE_WINDOW_SPACING) .Add(tabView) .Add(new BSeparatorView(B_HORIZONTAL)) .AddGroup(B_HORIZONTAL) .SetInsets(B_USE_WINDOW_SPACING, B_USE_DEFAULT_SPACING, B_USE_WINDOW_SPACING, 0) .Add(fRevertButton) .AddGlue() .Add(fCancelButton) .Add(fApplyButton); tabView->AddTab(_CreateGeneralPage(spacing)); tabView->AddTab(_CreateFontsPage(spacing)); tabView->AddTab(_CreateProxyPage(spacing)); _SetupFontSelectionView(fStandardFontView, new BMessage(MSG_STANDARD_FONT_CHANGED)); _SetupFontSelectionView(fSerifFontView, new BMessage(MSG_SERIF_FONT_CHANGED)); _SetupFontSelectionView(fSansSerifFontView, new BMessage(MSG_SANS_SERIF_FONT_CHANGED)); _SetupFontSelectionView(fFixedFontView, new BMessage(MSG_FIXED_FONT_CHANGED)); fApplyButton->MakeDefault(true); if (!frame.IsValid()) CenterOnScreen(); // load settings from disk _RevertSettings(); // apply to WebKit _ApplySettings(); // Start hidden Hide(); Show(); } SettingsWindow::~SettingsWindow() { RemoveHandler(fStandardFontView); delete fStandardFontView; RemoveHandler(fSerifFontView); delete fSerifFontView; RemoveHandler(fSansSerifFontView); delete fSansSerifFontView; RemoveHandler(fFixedFontView); delete fFixedFontView; delete fOpenFilePanel; } void SettingsWindow::MessageReceived(BMessage* message) { switch (message->what) { case MSG_APPLY: _ApplySettings(); break; case MSG_CANCEL: _RevertSettings(); PostMessage(B_QUIT_REQUESTED); break; case MSG_REVERT: _RevertSettings(); break; case MSG_CHOOSE_DOWNLOAD_FOLDER: _ChooseDownloadFolder(message); break; case MSG_HANDLE_DOWNLOAD_FOLDER: _HandleDownloadPanelResult(fOpenFilePanel, message); break; case MSG_STANDARD_FONT_SIZE_SELECTED: { int32 size = fStandardSizesSpinner->Value(); fStandardFontView->SetSize(size); fSerifFontView->SetSize(size); fSansSerifFontView->SetSize(size); _ValidateControlsEnabledStatus(); break; } case MSG_FIXED_FONT_SIZE_SELECTED: { int32 size = fFixedSizesSpinner->Value(); fFixedFontView->SetSize(size); _ValidateControlsEnabledStatus(); break; } case MSG_START_PAGE_CHANGED: case MSG_SEARCH_PAGE_CHANGED: case MSG_DOWNLOAD_FOLDER_CHANGED: case MSG_START_UP_BEHAVIOR_CHANGED: case MSG_NEW_WINDOWS_BEHAVIOR_CHANGED: case MSG_NEW_TABS_BEHAVIOR_CHANGED: case MSG_HISTORY_MENU_DAYS_CHANGED: case MSG_TAB_DISPLAY_BEHAVIOR_CHANGED: case MSG_AUTO_HIDE_INTERFACE_BEHAVIOR_CHANGED: case MSG_AUTO_HIDE_POINTER_BEHAVIOR_CHANGED: case MSG_SHOW_HOME_BUTTON_CHANGED: case MSG_STANDARD_FONT_CHANGED: case MSG_SERIF_FONT_CHANGED: case MSG_SANS_SERIF_FONT_CHANGED: case MSG_FIXED_FONT_CHANGED: case MSG_USE_PROXY_CHANGED: case MSG_PROXY_ADDRESS_CHANGED: case MSG_PROXY_PORT_CHANGED: case MSG_USE_PROXY_AUTH_CHANGED: case MSG_PROXY_USERNAME_CHANGED: case MSG_PROXY_PASSWORD_CHANGED: // TODO: Some settings could change live, some others not? _ValidateControlsEnabledStatus(); break; default: BWindow::MessageReceived(message); break; } } bool SettingsWindow::QuitRequested() { if (!IsHidden()) Hide(); return false; } void SettingsWindow::Show() { // When showing the window, the this is always the // point to which we can revert the settings. _RevertSettings(); BWindow::Show(); } // #pragma mark - private BView* SettingsWindow::_CreateGeneralPage(float spacing) { fStartPageControl = new BTextControl("start page", B_TRANSLATE("Start page:"), "", new BMessage(MSG_START_PAGE_CHANGED)); fStartPageControl->SetModificationMessage( new BMessage(MSG_START_PAGE_CHANGED)); fStartPageControl->SetText( fSettings->GetValue(kSettingsKeyStartPageURL, kDefaultStartPageURL)); fSearchPageControl = new BTextControl("search page", B_TRANSLATE("Search page:"), "", new BMessage(MSG_SEARCH_PAGE_CHANGED)); fSearchPageControl->SetModificationMessage( new BMessage(MSG_SEARCH_PAGE_CHANGED)); fSearchPageControl->SetToolTip(B_TRANSLATE("%s - Search term")); BString searchURL = fSettings->GetValue(kSettingsKeySearchPageURL, kDefaultSearchPageURL); if (searchURL == "http://www.google.com") { // Migrate old settings files. searchURL = kDefaultSearchPageURL; fSettings->SetValue(kSettingsKeySearchPageURL, kDefaultSearchPageURL); } fSearchPageControl->SetText(searchURL); fDownloadFolderControl = new BTextControl("download folder", B_TRANSLATE("Download folder:"), "", new BMessage(MSG_DOWNLOAD_FOLDER_CHANGED)); fDownloadFolderControl->SetModificationMessage( new BMessage(MSG_DOWNLOAD_FOLDER_CHANGED)); fDownloadFolderControl->SetText( fSettings->GetValue(kSettingsKeyDownloadPath, kDefaultDownloadPath)); fStartUpBehaviorResumePriorSession = new BMenuItem( B_TRANSLATE("Resume prior session"), new BMessage(MSG_START_UP_BEHAVIOR_CHANGED)); fStartUpBehaviorStartNewSession = new BMenuItem( B_TRANSLATE("Start new session"), new BMessage(MSG_START_UP_BEHAVIOR_CHANGED)); fNewWindowBehaviorOpenHomeItem = new BMenuItem( B_TRANSLATE("Open start page"), new BMessage(MSG_NEW_WINDOWS_BEHAVIOR_CHANGED)); fNewWindowBehaviorOpenSearchItem = new BMenuItem( B_TRANSLATE("Open search page"), new BMessage(MSG_NEW_WINDOWS_BEHAVIOR_CHANGED)); fNewWindowBehaviorOpenBlankItem = new BMenuItem( B_TRANSLATE("Open blank page"), new BMessage(MSG_NEW_WINDOWS_BEHAVIOR_CHANGED)); fNewTabBehaviorCloneCurrentItem = new BMenuItem( B_TRANSLATE("Clone current page"), new BMessage(MSG_NEW_TABS_BEHAVIOR_CHANGED)); fNewTabBehaviorOpenHomeItem = new BMenuItem( B_TRANSLATE("Open start page"), new BMessage(MSG_NEW_TABS_BEHAVIOR_CHANGED)); fNewTabBehaviorOpenSearchItem = new BMenuItem( B_TRANSLATE("Open search page"), new BMessage(MSG_NEW_TABS_BEHAVIOR_CHANGED)); fNewTabBehaviorOpenBlankItem = new BMenuItem( B_TRANSLATE("Open blank page"), new BMessage(MSG_NEW_TABS_BEHAVIOR_CHANGED)); fChooseButton = new BButton(B_TRANSLATE("Browse" B_UTF8_ELLIPSIS), new BMessage(MSG_CHOOSE_DOWNLOAD_FOLDER)); fNewWindowBehaviorOpenHomeItem->SetMarked(true); fNewTabBehaviorOpenBlankItem->SetMarked(true); fStartUpBehaviorResumePriorSession->SetMarked(true); BPopUpMenu* startUpBehaviorMenu = new BPopUpMenu("Start up"); startUpBehaviorMenu->AddItem(fStartUpBehaviorResumePriorSession); startUpBehaviorMenu->AddItem(fStartUpBehaviorStartNewSession); fStartUpBehaviorMenu = new BMenuField("start up behavior", B_TRANSLATE("Start up:"), startUpBehaviorMenu); BPopUpMenu* newWindowBehaviorMenu = new BPopUpMenu("New windows"); newWindowBehaviorMenu->AddItem(fNewWindowBehaviorOpenHomeItem); newWindowBehaviorMenu->AddItem(fNewWindowBehaviorOpenSearchItem); newWindowBehaviorMenu->AddItem(fNewWindowBehaviorOpenBlankItem); fNewWindowBehaviorMenu = new BMenuField("new window behavior", B_TRANSLATE("New windows:"), newWindowBehaviorMenu); BPopUpMenu* newTabBehaviorMenu = new BPopUpMenu("New tabs"); newTabBehaviorMenu->AddItem(fNewTabBehaviorOpenBlankItem); newTabBehaviorMenu->AddItem(fNewTabBehaviorOpenHomeItem); newTabBehaviorMenu->AddItem(fNewTabBehaviorOpenSearchItem); newTabBehaviorMenu->AddItem(fNewTabBehaviorCloneCurrentItem); fNewTabBehaviorMenu = new BMenuField("new tab behavior", B_TRANSLATE("New tabs:"), newTabBehaviorMenu); fDaysInHistory = new BSpinner("days in history", B_TRANSLATE("Number of days to keep links in History menu:"), new BMessage(MSG_HISTORY_MENU_DAYS_CHANGED)); fDaysInHistory->SetRange(1, 35); fDaysInHistory->SetValue( BrowsingHistory::DefaultInstance()->MaxHistoryItemAge()); fShowTabsIfOnlyOnePage = new BCheckBox("show tabs if only one page", B_TRANSLATE("Show tabs if only one page is open"), new BMessage(MSG_TAB_DISPLAY_BEHAVIOR_CHANGED)); fShowTabsIfOnlyOnePage->SetValue(B_CONTROL_ON); fAutoHideInterfaceInFullscreenMode = new BCheckBox("auto-hide interface", B_TRANSLATE("Auto-hide interface in full screen mode"), new BMessage(MSG_AUTO_HIDE_INTERFACE_BEHAVIOR_CHANGED)); fAutoHideInterfaceInFullscreenMode->SetValue(B_CONTROL_OFF); fAutoHidePointer = new BCheckBox("auto-hide pointer", B_TRANSLATE("Auto-hide mouse pointer"), new BMessage(MSG_AUTO_HIDE_POINTER_BEHAVIOR_CHANGED)); fAutoHidePointer->SetValue(B_CONTROL_OFF); fShowHomeButton = new BCheckBox("show home button", B_TRANSLATE("Show home button"), new BMessage(MSG_SHOW_HOME_BUTTON_CHANGED)); fShowHomeButton->SetValue(B_CONTROL_ON); BView* view = BGroupLayoutBuilder(B_VERTICAL, 0) .Add(BGridLayoutBuilder(spacing / 2, spacing / 2) .Add(fStartPageControl->CreateLabelLayoutItem(), 0, 0) .Add(fStartPageControl->CreateTextViewLayoutItem(), 1, 0) .Add(fSearchPageControl->CreateLabelLayoutItem(), 0, 1) .Add(fSearchPageControl->CreateTextViewLayoutItem(), 1, 1) .Add(fStartUpBehaviorMenu->CreateLabelLayoutItem(), 0, 2) .Add(fStartUpBehaviorMenu->CreateMenuBarLayoutItem(), 1, 2) .Add(fNewWindowBehaviorMenu->CreateLabelLayoutItem(), 0, 3) .Add(fNewWindowBehaviorMenu->CreateMenuBarLayoutItem(), 1, 3) .Add(fNewTabBehaviorMenu->CreateLabelLayoutItem(), 0, 4) .Add(fNewTabBehaviorMenu->CreateMenuBarLayoutItem(), 1, 4) .Add(fDownloadFolderControl->CreateLabelLayoutItem(), 0, 5) .Add(fDownloadFolderControl->CreateTextViewLayoutItem(), 1, 5) .Add(fChooseButton, 2, 5) ) .Add(BSpaceLayoutItem::CreateVerticalStrut(spacing)) .Add(new BSeparatorView(B_HORIZONTAL, B_PLAIN_BORDER)) .Add(BSpaceLayoutItem::CreateVerticalStrut(spacing)) .Add(fShowTabsIfOnlyOnePage) .Add(fAutoHideInterfaceInFullscreenMode) .Add(fAutoHidePointer) .Add(fShowHomeButton) .Add(BSpaceLayoutItem::CreateVerticalStrut(spacing)) .AddGroup(B_HORIZONTAL) .Add(fDaysInHistory) .AddGlue() .End() .AddGlue() .SetInsets(B_USE_WINDOW_SPACING, B_USE_WINDOW_SPACING, B_USE_WINDOW_SPACING, B_USE_DEFAULT_SPACING) .TopView() ; view->SetName(B_TRANSLATE("General")); return view; } BView* SettingsWindow::_CreateFontsPage(float spacing) { fStandardFontView = new FontSelectionView("standard", B_TRANSLATE("Standard font:"), true, be_plain_font); BFont defaultSerifFont = _FindDefaultSerifFont(); fSerifFontView = new FontSelectionView("serif", B_TRANSLATE("Serif font:"), true, &defaultSerifFont); fSansSerifFontView = new FontSelectionView("sans serif", B_TRANSLATE("Sans serif font:"), true, be_plain_font); fFixedFontView = new FontSelectionView("fixed", B_TRANSLATE("Fixed font:"), true, be_fixed_font); fStandardSizesSpinner = new BSpinner("standard font size", B_TRANSLATE("Default standard font size:"), new BMessage(MSG_STANDARD_FONT_SIZE_SELECTED)); fStandardSizesSpinner->SetAlignment(B_ALIGN_RIGHT); fFixedSizesSpinner = new BSpinner("fixed font size", B_TRANSLATE("Default fixed font size:"), new BMessage(MSG_FIXED_FONT_SIZE_SELECTED)); fFixedSizesSpinner->SetAlignment(B_ALIGN_RIGHT); BView* view = BGridLayoutBuilder(spacing / 2, spacing / 2) .Add(fStandardFontView->CreateFontsLabelLayoutItem(), 0, 0) .Add(fStandardFontView->CreateFontsMenuBarLayoutItem(), 1, 0) .Add(fStandardSizesSpinner->CreateLabelLayoutItem(), 2, 0) .Add(fStandardSizesSpinner->CreateTextViewLayoutItem(), 3, 0) .Add(fStandardFontView->PreviewBox(), 1, 1, 3) .Add(fSerifFontView->CreateFontsLabelLayoutItem(), 0, 2) .Add(fSerifFontView->CreateFontsMenuBarLayoutItem(), 1, 2) .Add(fSerifFontView->PreviewBox(), 1, 3, 3) .Add(fSansSerifFontView->CreateFontsLabelLayoutItem(), 0, 4) .Add(fSansSerifFontView->CreateFontsMenuBarLayoutItem(), 1, 4) .Add(fSansSerifFontView->PreviewBox(), 1, 5, 3) .Add(BSpaceLayoutItem::CreateVerticalStrut(spacing / 2), 0, 6, 2) .Add(fFixedFontView->CreateFontsLabelLayoutItem(), 0, 7) .Add(fFixedFontView->CreateFontsMenuBarLayoutItem(), 1, 7) .Add(fFixedSizesSpinner->CreateLabelLayoutItem(), 2, 7) .Add(fFixedSizesSpinner->CreateTextViewLayoutItem(), 3, 7) .Add(fFixedFontView->PreviewBox(), 1, 8, 3) .SetInsets(B_USE_WINDOW_SPACING, B_USE_WINDOW_SPACING, B_USE_WINDOW_SPACING, B_USE_DEFAULT_SPACING) .View(); view->SetName(B_TRANSLATE("Fonts")); return view; } BView* SettingsWindow::_CreateProxyPage(float spacing) { fUseProxyCheckBox = new BCheckBox("use proxy", B_TRANSLATE("Use proxy server to connect to the internet"), new BMessage(MSG_USE_PROXY_CHANGED)); fUseProxyCheckBox->SetValue(B_CONTROL_ON); fProxyAddressControl = new BTextControl("proxy address", B_TRANSLATE("Proxy server address:"), "", new BMessage(MSG_PROXY_ADDRESS_CHANGED)); fProxyAddressControl->SetModificationMessage( new BMessage(MSG_PROXY_ADDRESS_CHANGED)); fProxyAddressControl->SetText( fSettings->GetValue(kSettingsKeyProxyAddress, "")); fProxyPortControl = new BTextControl("proxy port", B_TRANSLATE("Proxy server port:"), "", new BMessage(MSG_PROXY_PORT_CHANGED)); fProxyPortControl->SetModificationMessage( new BMessage(MSG_PROXY_PORT_CHANGED)); fProxyPortControl->SetText( fSettings->GetValue(kSettingsKeyProxyPort, "")); fUseProxyAuthCheckBox = new BCheckBox("use authentication", B_TRANSLATE("Proxy server requires authentication"), new BMessage(MSG_USE_PROXY_AUTH_CHANGED)); fUseProxyAuthCheckBox->SetValue(B_CONTROL_ON); fProxyUsernameControl = new BTextControl("proxy username", B_TRANSLATE("Proxy username:"), "", new BMessage(MSG_PROXY_USERNAME_CHANGED)); fProxyUsernameControl->SetModificationMessage( new BMessage(MSG_PROXY_USERNAME_CHANGED)); fProxyUsernameControl->SetText( fSettings->GetValue(kSettingsKeyProxyUsername, "")); fProxyPasswordControl = new BTextControl("proxy password", B_TRANSLATE("Proxy password:"), "", new BMessage(MSG_PROXY_PASSWORD_CHANGED)); fProxyPasswordControl->SetModificationMessage( new BMessage(MSG_PROXY_PASSWORD_CHANGED)); fProxyPasswordControl->TextView()->HideTyping(true); fProxyPasswordControl->SetText( fSettings->GetValue(kSettingsKeyProxyPassword, "")); BView* view = BGridLayoutBuilder(spacing / 2, spacing / 2) .Add(fUseProxyCheckBox, 0, 0, 2) .Add(fProxyAddressControl->CreateLabelLayoutItem(), 0, 1) .Add(fProxyAddressControl->CreateTextViewLayoutItem(), 1, 1, 2) .Add(fProxyPortControl->CreateLabelLayoutItem(), 0, 2) .Add(fProxyPortControl->CreateTextViewLayoutItem(), 1, 2, 2) .Add(BSpaceLayoutItem::CreateVerticalStrut(spacing), 0, 3) .Add(fUseProxyAuthCheckBox, 0, 4, 2) .Add(fProxyUsernameControl->CreateLabelLayoutItem(), 0, 5) .Add(fProxyUsernameControl->CreateTextViewLayoutItem(), 1, 5, 2) .Add(fProxyPasswordControl->CreateLabelLayoutItem(), 0, 6) .Add(fProxyPasswordControl->CreateTextViewLayoutItem(), 1, 6, 2) .Add(BSpaceLayoutItem::CreateGlue(), 0, 7) .SetInsets(B_USE_WINDOW_SPACING, B_USE_WINDOW_SPACING, B_USE_WINDOW_SPACING, B_USE_DEFAULT_SPACING) .View(); view->SetName(B_TRANSLATE("Proxy server")); return view; } void SettingsWindow::_SetupFontSelectionView(FontSelectionView* view, BMessage* message) { AddHandler(view); view->AttachedToLooper(); view->SetMessage(message); view->SetTarget(this); } // #pragma mark - bool SettingsWindow::_CanApplySettings() const { bool canApply = false; // General settings canApply = canApply || (strcmp(fStartPageControl->Text(), fSettings->GetValue(kSettingsKeyStartPageURL, kDefaultStartPageURL)) != 0); canApply = canApply || (strcmp(fSearchPageControl->Text(), fSettings->GetValue(kSettingsKeySearchPageURL, kDefaultSearchPageURL)) != 0); canApply = canApply || (strcmp(fDownloadFolderControl->Text(), fSettings->GetValue(kSettingsKeyDownloadPath, kDefaultDownloadPath)) != 0); canApply = canApply || ((fShowTabsIfOnlyOnePage->Value() == B_CONTROL_ON) != fSettings->GetValue(kSettingsKeyShowTabsIfSinglePageOpen, true)); canApply = canApply || ( (fAutoHideInterfaceInFullscreenMode->Value() == B_CONTROL_ON) != fSettings->GetValue(kSettingsKeyAutoHideInterfaceInFullscreenMode, false)); canApply = canApply || ( (fAutoHidePointer->Value() == B_CONTROL_ON) != fSettings->GetValue(kSettingsKeyAutoHidePointer, false)); canApply = canApply || ((fShowHomeButton->Value() == B_CONTROL_ON) != fSettings->GetValue(kSettingsKeyShowHomeButton, true)); canApply = canApply || (fDaysInHistory->Value() != BrowsingHistory::DefaultInstance()->MaxHistoryItemAge()); // Start up policy canApply = canApply || (_StartUpPolicy() != fSettings->GetValue(kSettingsKeyStartUpPolicy, (uint32)ResumePriorSession)); // New window policy canApply = canApply || (_NewWindowPolicy() != fSettings->GetValue(kSettingsKeyNewWindowPolicy, (uint32)OpenStartPage)); // New tab policy canApply = canApply || (_NewTabPolicy() != fSettings->GetValue(kSettingsKeyNewTabPolicy, (uint32)OpenBlankPage)); // Font settings canApply = canApply || (fStandardFontView->Font() != fSettings->GetValue("standard font", *be_plain_font)); canApply = canApply || (fSerifFontView->Font() != fSettings->GetValue("serif font", _FindDefaultSerifFont())); canApply = canApply || (fSansSerifFontView->Font() != fSettings->GetValue("sans serif font", *be_plain_font)); canApply = canApply || (fFixedFontView->Font() != fSettings->GetValue("fixed font", *be_fixed_font)); canApply = canApply || (fStandardSizesSpinner->Value() != fSettings->GetValue("standard font size", kDefaultFontSize)); canApply = canApply || (fFixedSizesSpinner->Value() != fSettings->GetValue("fixed font size", kDefaultFontSize)); // Proxy settings canApply = canApply || ((fUseProxyCheckBox->Value() == B_CONTROL_ON) != fSettings->GetValue(kSettingsKeyUseProxy, false)); canApply = canApply || (strcmp(fProxyAddressControl->Text(), fSettings->GetValue(kSettingsKeyProxyAddress, "")) != 0); canApply = canApply || (_ProxyPort() != fSettings->GetValue(kSettingsKeyProxyPort, (uint32)0)); canApply = canApply || ((fUseProxyAuthCheckBox->Value() == B_CONTROL_ON) != fSettings->GetValue(kSettingsKeyUseProxyAuth, false)); canApply = canApply || (strcmp(fProxyUsernameControl->Text(), fSettings->GetValue(kSettingsKeyProxyUsername, "")) != 0); canApply = canApply || (strcmp(fProxyPasswordControl->Text(), fSettings->GetValue(kSettingsKeyProxyPassword, "")) != 0); return canApply; } void SettingsWindow::_ApplySettings() { // Store general settings BrowsingHistory::DefaultInstance()->SetMaxHistoryItemAge( (uint32)fDaysInHistory->Value()); fSettings->SetValue(kSettingsKeyStartPageURL, fStartPageControl->Text()); fSettings->SetValue(kSettingsKeySearchPageURL, fSearchPageControl->Text()); fSettings->SetValue(kSettingsKeyDownloadPath, fDownloadFolderControl->Text()); fSettings->SetValue(kSettingsKeyShowTabsIfSinglePageOpen, fShowTabsIfOnlyOnePage->Value() == B_CONTROL_ON); fSettings->SetValue(kSettingsKeyAutoHideInterfaceInFullscreenMode, fAutoHideInterfaceInFullscreenMode->Value() == B_CONTROL_ON); fSettings->SetValue(kSettingsKeyAutoHidePointer, fAutoHidePointer->Value() == B_CONTROL_ON); fSettings->SetValue(kSettingsKeyShowHomeButton, fShowHomeButton->Value() == B_CONTROL_ON); // New page policies fSettings->SetValue(kSettingsKeyStartUpPolicy, _StartUpPolicy()); fSettings->SetValue(kSettingsKeyNewWindowPolicy, _NewWindowPolicy()); fSettings->SetValue(kSettingsKeyNewTabPolicy, _NewTabPolicy()); // Store fond settings fSettings->SetValue("standard font", fStandardFontView->Font()); fSettings->SetValue("serif font", fSerifFontView->Font()); fSettings->SetValue("sans serif font", fSansSerifFontView->Font()); fSettings->SetValue("fixed font", fFixedFontView->Font()); int32 standardFontSize = fStandardSizesSpinner->Value(); int32 fixedFontSize = fFixedSizesSpinner->Value(); fSettings->SetValue("standard font size", standardFontSize); fSettings->SetValue("fixed font size", fixedFontSize); // Store proxy settings fSettings->SetValue(kSettingsKeyUseProxy, fUseProxyCheckBox->Value() == B_CONTROL_ON); fSettings->SetValue(kSettingsKeyProxyAddress, fProxyAddressControl->Text()); uint32 proxyPort = _ProxyPort(); fSettings->SetValue(kSettingsKeyProxyPort, proxyPort); fSettings->SetValue(kSettingsKeyUseProxyAuth, fUseProxyAuthCheckBox->Value() == B_CONTROL_ON); fSettings->SetValue(kSettingsKeyProxyUsername, fProxyUsernameControl->Text()); fSettings->SetValue(kSettingsKeyProxyPassword, fProxyPasswordControl->Text()); fSettings->Save(); // Apply settings to default web page settings. BWebSettings::Default()->SetStandardFont(fStandardFontView->Font()); BWebSettings::Default()->SetSerifFont(fSerifFontView->Font()); BWebSettings::Default()->SetSansSerifFont(fSansSerifFontView->Font()); BWebSettings::Default()->SetFixedFont(fFixedFontView->Font()); BWebSettings::Default()->SetDefaultStandardFontSize(standardFontSize); BWebSettings::Default()->SetDefaultFixedFontSize(fixedFontSize); if (fUseProxyCheckBox->Value() == B_CONTROL_ON) { if (fUseProxyAuthCheckBox->Value() == B_CONTROL_ON) { BWebSettings::Default()->SetProxyInfo(fProxyAddressControl->Text(), proxyPort, B_PROXY_TYPE_HTTP, fProxyUsernameControl->Text(), fProxyPasswordControl->Text()); } else { BWebSettings::Default()->SetProxyInfo(fProxyAddressControl->Text(), proxyPort, B_PROXY_TYPE_HTTP, "", ""); } } else BWebSettings::Default()->SetProxyInfo(); // This will find all currently instantiated page settings and apply // the default values, unless the page settings have local overrides. BWebSettings::Default()->Apply(); _ValidateControlsEnabledStatus(); } void SettingsWindow::_RevertSettings() { fStartPageControl->SetText( fSettings->GetValue(kSettingsKeyStartPageURL, kDefaultStartPageURL)); fSearchPageControl->SetText( fSettings->GetValue(kSettingsKeySearchPageURL, kDefaultSearchPageURL)); fDownloadFolderControl->SetText( fSettings->GetValue(kSettingsKeyDownloadPath, kDefaultDownloadPath)); fShowTabsIfOnlyOnePage->SetValue( fSettings->GetValue(kSettingsKeyShowTabsIfSinglePageOpen, true)); fAutoHideInterfaceInFullscreenMode->SetValue( fSettings->GetValue(kSettingsKeyAutoHideInterfaceInFullscreenMode, false)); fAutoHidePointer->SetValue( fSettings->GetValue(kSettingsKeyAutoHidePointer, false)); fShowHomeButton->SetValue( fSettings->GetValue(kSettingsKeyShowHomeButton, true)); fDaysInHistory->SetValue( BrowsingHistory::DefaultInstance()->MaxHistoryItemAge()); // Start Up policy uint32 startUpPolicy = fSettings->GetValue(kSettingsKeyStartUpPolicy, (uint32)ResumePriorSession); switch (startUpPolicy) { default: case ResumePriorSession: fStartUpBehaviorResumePriorSession->SetMarked(true); break; case StartNewSession: fStartUpBehaviorStartNewSession->SetMarked(true); break; } // New window policy uint32 newWindowPolicy = fSettings->GetValue(kSettingsKeyNewWindowPolicy, (uint32)OpenStartPage); switch (newWindowPolicy) { default: case OpenStartPage: fNewWindowBehaviorOpenHomeItem->SetMarked(true); break; case OpenSearchPage: fNewWindowBehaviorOpenSearchItem->SetMarked(true); break; case OpenBlankPage: fNewWindowBehaviorOpenBlankItem->SetMarked(true); break; } // New tab policy uint32 newTabPolicy = fSettings->GetValue(kSettingsKeyNewTabPolicy, (uint32)OpenBlankPage); switch (newTabPolicy) { default: case OpenBlankPage: fNewTabBehaviorOpenBlankItem->SetMarked(true); break; case OpenStartPage: fNewTabBehaviorOpenHomeItem->SetMarked(true); break; case OpenSearchPage: fNewTabBehaviorOpenSearchItem->SetMarked(true); break; case CloneCurrentPage: fNewTabBehaviorCloneCurrentItem->SetMarked(true); break; } // Font settings int32 defaultFontSize = fSettings->GetValue("standard font size", kDefaultFontSize); int32 defaultFixedFontSize = fSettings->GetValue("fixed font size", kDefaultFontSize); fStandardSizesSpinner->SetValue(defaultFontSize); fFixedSizesSpinner->SetValue(defaultFixedFontSize); fStandardFontView->SetFont(fSettings->GetValue("standard font", *be_plain_font), defaultFontSize); fSerifFontView->SetFont(fSettings->GetValue("serif font", _FindDefaultSerifFont()), defaultFontSize); fSansSerifFontView->SetFont(fSettings->GetValue("sans serif font", *be_plain_font), defaultFontSize); fFixedFontView->SetFont(fSettings->GetValue("fixed font", *be_fixed_font), defaultFixedFontSize); // Proxy settings fUseProxyCheckBox->SetValue(fSettings->GetValue(kSettingsKeyUseProxy, false)); fProxyAddressControl->SetText(fSettings->GetValue(kSettingsKeyProxyAddress, "")); BString keyProxyPort; keyProxyPort << fSettings->GetValue(kSettingsKeyProxyPort, (uint32)0); fProxyPortControl->SetText(keyProxyPort.String()); fUseProxyAuthCheckBox->SetValue(fSettings->GetValue(kSettingsKeyUseProxyAuth, false)); fProxyUsernameControl->SetText(fSettings->GetValue(kSettingsKeyProxyUsername, "")); fProxyPasswordControl->SetText(fSettings->GetValue(kSettingsKeyProxyPassword, "")); _ValidateControlsEnabledStatus(); } void SettingsWindow::_ChooseDownloadFolder(const BMessage* message) { if (fOpenFilePanel == NULL) { BMessenger target(this); fOpenFilePanel = new (std::nothrow) BFilePanel(B_OPEN_PANEL, &target, NULL, B_DIRECTORY_NODE); } BMessage panelMessage(MSG_HANDLE_DOWNLOAD_FOLDER); fOpenFilePanel->SetMessage(&panelMessage); fOpenFilePanel->Show(); } void SettingsWindow:: _HandleDownloadPanelResult(BFilePanel* panel, const BMessage* message) { entry_ref ref; if (message->FindRef("refs", 0, &ref) == B_OK) { BPath path(&ref); fDownloadFolderControl->SetText(path.Path()); } } void SettingsWindow::_ValidateControlsEnabledStatus() { bool canApply = _CanApplySettings(); fApplyButton->SetEnabled(canApply); fRevertButton->SetEnabled(canApply); // Let the Cancel button be enabled always, as another way to close the // window... fCancelButton->SetEnabled(true); bool useProxy = fUseProxyCheckBox->Value() == B_CONTROL_ON; fProxyAddressControl->SetEnabled(useProxy); fProxyPortControl->SetEnabled(useProxy); fUseProxyAuthCheckBox->SetEnabled(useProxy); bool useProxyAuth = useProxy && fUseProxyAuthCheckBox->Value() == B_CONTROL_ON; fProxyUsernameControl->SetEnabled(useProxyAuth); fProxyPasswordControl->SetEnabled(useProxyAuth); } // #pragma mark - uint32 SettingsWindow::_StartUpPolicy() const { uint32 startUpPolicy = ResumePriorSession; BMenuItem* markedItem = fStartUpBehaviorMenu->Menu()->FindMarked(); if (markedItem == fStartUpBehaviorStartNewSession) startUpPolicy = StartNewSession; return startUpPolicy; } uint32 SettingsWindow::_NewWindowPolicy() const { uint32 newWindowPolicy = OpenStartPage; BMenuItem* markedItem = fNewWindowBehaviorMenu->Menu()->FindMarked(); if (markedItem == fNewWindowBehaviorOpenSearchItem) newWindowPolicy = OpenSearchPage; else if (markedItem == fNewWindowBehaviorOpenBlankItem) newWindowPolicy = OpenBlankPage; return newWindowPolicy; } uint32 SettingsWindow::_NewTabPolicy() const { uint32 newTabPolicy = OpenBlankPage; BMenuItem* markedItem = fNewTabBehaviorMenu->Menu()->FindMarked(); if (markedItem == fNewTabBehaviorCloneCurrentItem) newTabPolicy = CloneCurrentPage; else if (markedItem == fNewTabBehaviorOpenHomeItem) newTabPolicy = OpenStartPage; else if (markedItem == fNewTabBehaviorOpenSearchItem) newTabPolicy = OpenSearchPage; return newTabPolicy; } BFont SettingsWindow::_FindDefaultSerifFont() const { // Default to the first "serif" font we find. BFont serifFont(*be_plain_font); font_family family; int32 familyCount = count_font_families(); for (int32 i = 0; i < familyCount; i++) { if (get_font_family(i, &family) == B_OK) { BString familyString(family); if (familyString.IFindFirst("sans") >= 0) continue; if (familyString.IFindFirst("serif") >= 0) { serifFont.SetFamilyAndFace(family, B_REGULAR_FACE); break; } } } return serifFont; } uint32 SettingsWindow::_ProxyPort() const { return atoul(fProxyPortControl->Text()); }
/** * @copyright * Copyright (c) 2017 - SLD Group @ Columbia University. All Rights Reserved. * * This file is part of Mnemosyne. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @file binary_value.hpp * @author Christian Pilato <christian.pilato@polimi.it> * * @brief Class to represent binary values * */ #ifndef _BINARY_VALUE_ #define _BINARY_VALUE_ #include "rtl_node.hpp" FORWARD_DECL(BinaryValue); class BinaryValue : public RtlNode { public: typedef enum { BIN, HEX, DEC } type_t; protected: type_t type; unsigned int num_bits; std::string string_value; public: BinaryValue(); virtual ~BinaryValue(); static BinaryValuePtr create(const std::string& string_value); static BinaryValuePtr create(type_t type, unsigned int size, const std::string& string_value); unsigned int get_num_bits() const; const type_t get_type() const { return type; } const std::string get_string_value() const { return string_value; } virtual const std::string get_node_name() const { return "BinaryValue"; } virtual void show() const; }; #endif
#include <gtest/gtest.h> #include "device_server.h" #include "nidaqmx/nidaqmx_service.h" using namespace nidaqmx_grpc; namespace ni { namespace tests { namespace system { class NiDAQmxSessionTests : public ::testing::Test { protected: NiDAQmxSessionTests() : device_server_(DeviceServerInterface::Singleton()), nidaqmx_stub_(NiDAQmx::NewStub(device_server_->InProcessChannel())) { } virtual ~NiDAQmxSessionTests() {} ::grpc::Status create_task(const std::string& name, CreateTaskResponse& response) { ::grpc::ClientContext context; CreateTaskRequest request; request.set_session_name(name); return stub()->CreateTask(&context, request, &response); } ::grpc::Status clear_task(const std::string& name, uint32_t session_id, ClearTaskResponse& response) { ::grpc::ClientContext context; ClearTaskRequest request; if (!name.empty()) { request.mutable_task()->set_name(name); } if (session_id) { request.mutable_task()->set_id(session_id); } return stub()->ClearTask(&context, request, &response); } std::unique_ptr<NiDAQmx::Stub>& stub() { return nidaqmx_stub_; } DeviceServerInterface* device_server_; std::unique_ptr<NiDAQmx::Stub> nidaqmx_stub_; }; TEST_F(NiDAQmxSessionTests, CreateTask_ClearTask_Succeeds) { CreateTaskResponse create_response; auto create_status = create_task("", create_response); ClearTaskResponse clear_response; auto clear_status = clear_task("", create_response.task().id(), clear_response); EXPECT_TRUE(create_status.ok()); EXPECT_TRUE(clear_status.ok()); EXPECT_EQ(DAQmxSuccess, create_response.status()); EXPECT_EQ(DAQmxSuccess, clear_response.status()); EXPECT_NE(0, create_response.task().id()); } } // namespace system } // namespace tests } // namespace ni
/* * Copyright (c) Facebook, Inc. and its affiliates. */ #include "h265_bitstream_parser.h" #include <stdio.h> #include <cstdint> #include <memory> #include <vector> #include "h265_bitstream_parser_state.h" #include "h265_common.h" #include "h265_nal_unit_parser.h" namespace { // The size of a full NALU start sequence {0 0 0 1}, used for the first NALU // of an access unit, and for SPS and PPS blocks. // const size_t kNaluLongStartSequenceSize = 4; // The size of a shortened NALU start sequence {0 0 1}, that may be used if // not the first NALU of an access unit or an SPS or PPS block. const size_t kNaluShortStartSequenceSize = 3; } // namespace namespace h265nal { // General note: this is based off the 2016/12 version of the H.265 standard. // You can find it on this page: // http://www.itu.int/rec/T-REC-H.265 std::vector<H265BitstreamParser::NaluIndex> H265BitstreamParser::FindNaluIndices(const uint8_t* data, size_t length) noexcept { // This is sorta like Boyer-Moore, but with only the first optimization step: // given a 3-byte sequence we're looking at, if the 3rd byte isn't 1 or 0, // skip ahead to the next 3-byte sequence. 0s and 1s are relatively rare, so // this will skip the majority of reads/checks. std::vector<NaluIndex> sequences; if (length < kNaluShortStartSequenceSize) { return sequences; } const size_t end = length - kNaluShortStartSequenceSize; for (size_t i = 0; i < end;) { if (data[i + 2] > 1) { i += 3; } else if (data[i + 2] == 0x01 && data[i + 1] == 0x00 && data[i] == 0x00) { // We found a start sequence, now check if it was a 3 of 4 byte one. NaluIndex index = {i, i + 3, 0}; if (index.start_offset > 0 && data[index.start_offset - 1] == 0) --index.start_offset; // Update length of previous entry. auto it = sequences.rbegin(); if (it != sequences.rend()) it->payload_size = index.start_offset - it->payload_start_offset; sequences.push_back(index); i += 3; } else { ++i; } } // Update length of last entry, if any. auto it = sequences.rbegin(); if (it != sequences.rend()) it->payload_size = length - it->payload_start_offset; return sequences; } // Parse a raw (RBSP) buffer with explicit NAL unit separator (3- or 4-byte // sequence start code prefix). Function splits the stream in NAL units, // and then parses each NAL unit. For that, it unpacks the RBSP inside // each NAL unit buffer, and adds the corresponding parsed struct into // the `bitstream` list (a `BitstreamState` object). // Function returns the said `bitstream` list. std::unique_ptr<H265BitstreamParser::BitstreamState> H265BitstreamParser::ParseBitstream( const uint8_t* data, size_t length, H265BitstreamParserState* bitstream_parser_state, bool add_checksum) noexcept { auto bitstream = std::make_unique<BitstreamState>(); // (1) split the input string into a vector of NAL units std::vector<NaluIndex> nalu_indices = FindNaluIndices(data, length); // process each of the NAL units for (const NaluIndex& nalu_index : nalu_indices) { // (2) parse the NAL units, and add them to the vector auto nal_unit = H265NalUnitParser::ParseNalUnit( &data[nalu_index.payload_start_offset], nalu_index.payload_size, bitstream_parser_state, add_checksum); if (nal_unit == nullptr) { // cannot parse the NalUnit #ifdef FPRINT_ERRORS fprintf(stderr, "error: cannot parse buffer into NalUnit\n"); #endif // FPRINT_ERRORS continue; } // store the offset nal_unit->offset = nalu_index.payload_start_offset; nal_unit->length = nalu_index.payload_size; bitstream->nal_units.push_back(std::move(nal_unit)); } return bitstream; } std::unique_ptr<H265BitstreamParser::BitstreamState> H265BitstreamParser::ParseBitstream(const uint8_t* data, size_t length, bool add_offset, bool add_length, bool add_parsed_length, bool add_checksum) noexcept { // keep a bitstream parser state (to keep the VPS/PPS/SPS NALUs) H265BitstreamParserState bitstream_parser_state; // create bitstream parser state auto bitstream = std::make_unique<BitstreamState>(); // parse the file bitstream = ParseBitstream(data, length, &bitstream_parser_state, add_checksum); if (bitstream == nullptr) { // did not work #ifdef FPRINT_ERRORS fprintf(stderr, "Could not init h265 bitstream parser\n"); #endif // FPRINT_ERRORS return nullptr; } bitstream->add_offset = add_offset; bitstream->add_length = add_length; bitstream->add_parsed_length = add_parsed_length; bitstream->add_checksum = add_checksum; return bitstream; } #ifdef FDUMP_DEFINE void H265BitstreamParser::BitstreamState::fdump(FILE* outfp, int indent_level) const { for (auto& nal_unit : nal_units) { nal_unit->fdump(outfp, indent_level, add_offset, add_length, add_parsed_length, add_checksum); fprintf(outfp, "\n"); } } #endif // FDUMP_DEFINE } // namespace h265nal
/************************************************************************* * libjson-rpc-cpp ************************************************************************* * @file xbmcremote.cpp * @date 03.06.2013 * @author Peter Spiess-Knafl <dev@spiessknafl.at> * @license See attached LICENSE.txt ************************************************************************/ #include "gen/xbmcremote.h" #include <jsonrpccpp/client/connectors/httpclient.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #ifndef WIN32 #include <termios.h> #else #include <conio.h> #endif #include <time.h> #include <unistd.h> #include <iostream> using namespace jsonrpc; using namespace std; // Taken from: // http://stackoverflow.com/questions/2984307/c-key-pressed-in-linux-console int kbhit() { int ch; #ifndef WIN32 struct termios neu, alt; int fd = fileno(stdin); tcgetattr(fd, &alt); neu = alt; neu.c_lflag &= ~(ICANON | ECHO); tcsetattr(fd, TCSANOW, &neu); ch = getchar(); tcsetattr(fd, TCSANOW, &alt); #else while (!_kbhit()) { usleep(100000); } ch = _getch(); #endif return ch; } int main(int argc, char **argv) { if (argc < 2) { cerr << "Provide XBMC API URL as argument! e.g.: " << argv[0] << " http://127.0.0.1:8080/jsonrpc" << endl; return -1; } else { cout << "XBMC Remote control" << endl; cout << "\ta -> left" << endl; cout << "\td -> right" << endl; cout << "\tw -> up" << endl; cout << "\td -> down" << endl; cout << "\tEsc -> back" << endl; cout << "\tEnter -> select" << endl; cout << "\tx -> exit application" << endl; try { HttpClient httpclient(argv[1]); XbmcRemoteClient stub(httpclient); bool run = true; while (run) { int key = kbhit(); switch (key) { case 97: stub.Input_Left(); break; case 115: stub.Input_Down(); break; case 100: stub.Input_Right(); break; case 119: stub.Input_Up(); break; case 10: case 13: stub.Input_Select(); break; case 127: case 27: stub.Input_Back(); break; case 120: run = false; break; } } } catch (JsonRpcException &e) { cerr << e.what() << endl; } } return 0; }
/* SPDX-FileCopyrightText: 2010-2016 Sune Vuorela <sune@vuorela.dk> SPDX-License-Identifier: MIT */ #include "prison.h" #include "barcodeexamplewidget.h" // Prison #include <prison/abstractbarcode.h> #include <prison/prison.h> // Qt #include <QDebug> #include <QHBoxLayout> #include <QLineEdit> #include <QPushButton> #include <QSplitter> void main_window::data_changed() { QString result = m_lineedit->text(); m_dmw->setData(result); m_qrw->setData(result); m_39w->setData(result); m_93w->setData(result); m_nullw->setData(result); m_dmcolor->setData(result); m_qrcolor->setData(result); m_39color->setData(result); m_93color->setData(result); } main_window::main_window() { QHBoxLayout *lay = new QHBoxLayout(); m_lineedit = new QLineEdit(this); QPushButton *but = new QPushButton(this); connect(but, &QPushButton::clicked, this, &main_window::data_changed); lay->addWidget(m_lineedit); lay->addWidget(but); QVBoxLayout *mainlay = new QVBoxLayout(this); { Prison::AbstractBarcode *barcode = Prison::createBarcode(Prison::DataMatrix); if (!barcode) { qDebug() << "unsupported barcode, showing a black square"; } m_dmw = new BarcodeExampleWidget(barcode, this); } { Prison::AbstractBarcode *barcode = Prison::createBarcode(Prison::QRCode); if (!barcode) { qDebug() << "unsupported barcode, showing a black square"; } m_qrw = new BarcodeExampleWidget(barcode, this); } { Prison::AbstractBarcode *barcode = Prison::createBarcode(Prison::Code39); if (!barcode) { qDebug() << "unsupported barcode, showing a black square"; } m_39w = new BarcodeExampleWidget(barcode, this); } { Prison::AbstractBarcode *barcode = Prison::createBarcode(Prison::Code93); if (!barcode) { qDebug() << "unsupported barcode, showing a black square"; } m_93w = new BarcodeExampleWidget(barcode, this); } { Prison::AbstractBarcode *dmcolorcode = Prison::createBarcode(Prison::DataMatrix); if (dmcolorcode) { dmcolorcode->setForegroundColor(Qt::red); dmcolorcode->setBackgroundColor(Qt::darkBlue); } else { qDebug() << "unsupported barcode, showing a black square"; } m_dmcolor = new BarcodeExampleWidget(dmcolorcode, this); } { Prison::AbstractBarcode *qrcolorcode = Prison::createBarcode(Prison::QRCode); if (qrcolorcode) { qrcolorcode->setForegroundColor(Qt::red); qrcolorcode->setBackgroundColor(Qt::darkBlue); } else { qDebug() << "unsupported barcode, showing a black square"; } m_qrcolor = new BarcodeExampleWidget(qrcolorcode, this); } { Prison::AbstractBarcode *c39colorcode = Prison::createBarcode(Prison::Code39); if (c39colorcode) { c39colorcode->setForegroundColor(Qt::red); c39colorcode->setBackgroundColor(Qt::darkBlue); } else { qDebug() << "unsupported barcode, showing a black square"; } m_39color = new BarcodeExampleWidget(c39colorcode, this); } { Prison::AbstractBarcode *c93colorcode = Prison::createBarcode(Prison::Code93); if (c93colorcode) { c93colorcode->setForegroundColor(Qt::red); c93colorcode->setBackgroundColor(Qt::darkBlue); } else { qDebug() << "unsupported barcode, showing a black square"; } m_93color = new BarcodeExampleWidget(c93colorcode, this); } m_nullw = new BarcodeExampleWidget(nullptr, this); QSplitter *splitter = new QSplitter(Qt::Vertical); splitter->addWidget(m_dmw); splitter->addWidget(m_qrw); splitter->addWidget(m_39w); splitter->addWidget(m_93w); splitter->addWidget(m_dmcolor); splitter->addWidget(m_qrcolor); splitter->addWidget(m_39color); splitter->addWidget(m_93color); splitter->addWidget(m_nullw); mainlay->addLayout(lay); mainlay->addWidget(splitter); m_lineedit->setText(QStringLiteral("AOEUIAOEUIAOEUI")); data_changed(); }
#include "scene_conchoid.h" #include "vec2.h" #include <vector> // Se manda a llamar una vez // cuando inicia la aplicacion void scene_conchoid::init() { std::vector<cgmath::vec2> upLine; std::vector<cgmath::vec2> downLine; float k = 0.75; cgmath::vec2 o = cgmath::vec2(0, -0.5); for (float x = -2; x < 2; x += 0.1) { cgmath::vec2 p(x, 0); // std::cout << p << " este es p\n"; cgmath::vec2 vecRest = (o - p); vecRest.normalize(); cgmath::vec2 d = vecRest * k; cgmath::vec2 q1 = p - d; cgmath::vec2 q2 = p + d; //std::cout << q1 << " este es q1\n"; std::cout << q2 << " este es q2\n"; upLine.push_back(q1); downLine.push_back(q2); } sizeUpLine = upLine.size(); // Crear un identificador para un // Vertex Array Object // Guarda el id en vao glGenVertexArrays(1, &vao); // Quiero comenzar a trabajar con // el siguiente vao glBindVertexArray(vao); // Crar un identificado para un // Vertex Buffer Object // Guardar el id en positionsVBO glGenBuffers(1, &positionsVBO); // Quiero trabajar con el buffer positionsBVO glBindBuffer(GL_ARRAY_BUFFER, positionsVBO); // Crear la memoria del buffer, // especifica los datos // y la manda al GPU glBufferData(GL_ARRAY_BUFFER, sizeof(cgmath::vec2) * upLine.size(), upLine.data(), GL_DYNAMIC_DRAW); // Prendo el atributo 0 glEnableVertexAttribArray(0); // Voy a configurar el atributo 0 // Numero de components // Tipo de dato de cada componente // Normalizamos los datos?? // Desfazamiento entre los atributos en la lista // Apuntador a los datos si no los hemos mandado glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, nullptr); // Cuando hacemos un bind con 0 // -> unbind // Unbind de positionsVBO glBindBuffer(GL_ARRAY_BUFFER, 0); // Unbind del vao glBindVertexArray(0); // ***************************************************** // Crear un identificador para un // Vertex Array Object // Guarda el id en vao glGenVertexArrays(1, &vao2); // Quiero comenzar a trabajar con // el siguiente vao glBindVertexArray(vao2); // Crar un identificado para un // Vertex Buffer Object // Guardar el id en positionsVBO glGenBuffers(1, &positionsVBO2); // Quiero trabajar con el buffer positionsBVO glBindBuffer(GL_ARRAY_BUFFER, positionsVBO2); // Crear la memoria del buffer, // especifica los datos // y la manda al GPU glBufferData(GL_ARRAY_BUFFER, sizeof(cgmath::vec2) * upLine.size(), downLine.data(), GL_DYNAMIC_DRAW); // Prendo el atributo 0 glEnableVertexAttribArray(0); // Voy a configurar el atributo 0 // Numero de components // Tipo de dato de cada componente // Normalizamos los datos?? // Desfazamiento entre los atributos en la lista // Apuntador a los datos si no los hemos mandado glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, nullptr); // Cuando hacemos un bind con 0 // -> unbind // Unbind de positionsVBO glBindBuffer(GL_ARRAY_BUFFER, 0); // Unbind del vao glBindVertexArray(0); // ***************************************************** primitiveType = GL_LINE_STRIP; } void scene_conchoid::awake() { glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glPointSize(20.0f); } void scene_conchoid::sleep() { glPointSize(1.0f); } void scene_conchoid::mainLoop() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Bind del vao que tiene todos los atributos glBindVertexArray(vao); // Llamada a dibujar // tipo de primitiva // Desde que vertice se comienza a dibujar y // cuantos vertices glDrawArrays(primitiveType, 0, sizeUpLine); // Unbind del vao y todos los atributos glBindVertexArray(0); // ************************************** // Bind del vao que tiene todos los atributos glBindVertexArray(vao2); // Llamada a dibujar // tipo de primitiva // Desde que vertice se comienza a dibujar y // cuantos vertices glDrawArrays(primitiveType, 0, sizeUpLine); // Unbind del vao y todos los atributos glBindVertexArray(0); // ************************************** } void scene_conchoid::normalKeysDown(unsigned char key) { /* if (key == '3') { primitiveType = GL_LINE_STRIP; } if (key == '2') { primitiveType = GL_LINE_STRIP; } */ if (key == '1') { primitiveType = GL_LINE_STRIP; } } void scene_conchoid::normalKeysUp(unsigned char key) { } void scene_conchoid::resize(int width, int height) { }
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromeos/network/device_state.h" #include "base/logging.h" #include "base/metrics/histogram.h" #include "base/strings/stringprintf.h" #include "base/values.h" #include "third_party/cros_system_api/dbus/service_constants.h" namespace chromeos { DeviceState::DeviceState(const std::string& path) : ManagedState(MANAGED_TYPE_DEVICE, path), provider_requires_roaming_(false), support_network_scan_(false), scanning_(false), sim_lock_enabled_(false), sim_present_(true) { } DeviceState::~DeviceState() { } bool DeviceState::PropertyChanged(const std::string& key, const base::Value& value) { // All property values get stored in |properties_|. properties_.SetWithoutPathExpansion(key, value.DeepCopy()); if (ManagedStatePropertyChanged(key, value)) return true; if (key == flimflam::kAddressProperty) { return GetStringValue(key, value, &mac_address_); } else if (key == flimflam::kScanningProperty) { return GetBooleanValue(key, value, &scanning_); } else if (key == flimflam::kSupportNetworkScanProperty) { return GetBooleanValue(key, value, &support_network_scan_); } else if (key == shill::kProviderRequiresRoamingProperty) { return GetBooleanValue(key, value, &provider_requires_roaming_); } else if (key == flimflam::kHomeProviderProperty) { const base::DictionaryValue* dict = NULL; if (!value.GetAsDictionary(&dict)) return false; std::string home_provider_country; std::string home_provider_name; dict->GetStringWithoutPathExpansion(flimflam::kOperatorCountryKey, &home_provider_country); dict->GetStringWithoutPathExpansion(flimflam::kOperatorNameKey, &home_provider_name); // Set home_provider_id_ if (!home_provider_name.empty() && !home_provider_country.empty()) { home_provider_id_ = base::StringPrintf( "%s (%s)", home_provider_name.c_str(), home_provider_country.c_str()); } else { dict->GetStringWithoutPathExpansion(flimflam::kOperatorCodeKey, &home_provider_id_); LOG(WARNING) << "Carrier ID not defined, using code instead: " << home_provider_id_; } return true; } else if (key == flimflam::kTechnologyFamilyProperty) { return GetStringValue(key, value, &technology_family_); } else if (key == flimflam::kCarrierProperty) { return GetStringValue(key, value, &carrier_); } else if (key == flimflam::kFoundNetworksProperty) { const base::ListValue* list = NULL; if (!value.GetAsList(&list)) return false; CellularScanResults parsed_results; if (!network_util::ParseCellularScanResults(*list, &parsed_results)) return false; scan_results_.swap(parsed_results); return true; } else if (key == flimflam::kSIMLockStatusProperty) { const base::DictionaryValue* dict = NULL; if (!value.GetAsDictionary(&dict)) return false; // Return true if at least one of the property values changed. bool property_changed = false; const base::Value* out_value = NULL; if (!dict->GetWithoutPathExpansion(flimflam::kSIMLockRetriesLeftProperty, &out_value)) return false; if (GetUInt32Value(flimflam::kSIMLockRetriesLeftProperty, *out_value, &sim_retries_left_)) property_changed = true; if (!dict->GetWithoutPathExpansion(flimflam::kSIMLockTypeProperty, &out_value)) return false; if (GetStringValue(flimflam::kSIMLockTypeProperty, *out_value, &sim_lock_type_)) property_changed = true; if (!dict->GetWithoutPathExpansion(flimflam::kSIMLockEnabledProperty, &out_value)) return false; if (GetBooleanValue(flimflam::kSIMLockEnabledProperty, *out_value, &sim_lock_enabled_)) property_changed = true; return property_changed; } else if (key == flimflam::kMeidProperty) { return GetStringValue(key, value, &meid_); } else if (key == flimflam::kImeiProperty) { return GetStringValue(key, value, &imei_); } else if (key == flimflam::kIccidProperty) { return GetStringValue(key, value, &iccid_); } else if (key == flimflam::kMdnProperty) { return GetStringValue(key, value, &mdn_); } else if (key == shill::kSIMPresentProperty) { return GetBooleanValue(key, value, &sim_present_); } return false; } bool DeviceState::InitialPropertiesReceived( const base::DictionaryValue& properties) { // Update UMA stats. if (sim_present_) { bool locked = !sim_lock_type_.empty(); UMA_HISTOGRAM_BOOLEAN("Cellular.SIMLocked", locked); } return false; } bool DeviceState::IsSimAbsent() const { return technology_family_ == flimflam::kTechnologyFamilyGsm && !sim_present_; } } // namespace chromeos
// Generated from /POI/java/org/apache/poi/hssf/record/FormulaRecord.java #include <org/apache/poi/hssf/record/FormulaRecord_SpecialCachedValue.hpp> #include <java/lang/Class.hpp> #include <java/lang/IllegalStateException.hpp> #include <java/lang/NullPointerException.hpp> #include <java/lang/String.hpp> #include <java/lang/StringBuilder.hpp> #include <org/apache/poi/ss/formula/eval/ErrorEval.hpp> #include <org/apache/poi/ss/usermodel/CellType.hpp> #include <org/apache/poi/util/HexDump.hpp> #include <org/apache/poi/util/LittleEndianOutput.hpp> #include <org/apache/poi/util/RecordFormatException.hpp> #include <Array.hpp> template<typename T> static T* npc(T* t) { if(!t) throw new ::java::lang::NullPointerException(); return t; } poi::hssf::record::FormulaRecord_SpecialCachedValue::FormulaRecord_SpecialCachedValue(const ::default_init_tag&) : super(*static_cast< ::default_init_tag* >(0)) { clinit(); } poi::hssf::record::FormulaRecord_SpecialCachedValue::FormulaRecord_SpecialCachedValue(::int8_tArray* data) : FormulaRecord_SpecialCachedValue(*static_cast< ::default_init_tag* >(0)) { ctor(data); } constexpr int64_t poi::hssf::record::FormulaRecord_SpecialCachedValue::BIT_MARKER; constexpr int32_t poi::hssf::record::FormulaRecord_SpecialCachedValue::VARIABLE_DATA_LENGTH; constexpr int32_t poi::hssf::record::FormulaRecord_SpecialCachedValue::DATA_INDEX; constexpr int32_t poi::hssf::record::FormulaRecord_SpecialCachedValue::STRING; constexpr int32_t poi::hssf::record::FormulaRecord_SpecialCachedValue::BOOLEAN; constexpr int32_t poi::hssf::record::FormulaRecord_SpecialCachedValue::ERROR_CODE; constexpr int32_t poi::hssf::record::FormulaRecord_SpecialCachedValue::EMPTY; void poi::hssf::record::FormulaRecord_SpecialCachedValue::ctor(::int8_tArray* data) { super::ctor(); _variableData = data; } int32_t poi::hssf::record::FormulaRecord_SpecialCachedValue::getTypeCode() { return (*_variableData)[int32_t(0)]; } poi::hssf::record::FormulaRecord_SpecialCachedValue* poi::hssf::record::FormulaRecord_SpecialCachedValue::create(int64_t valueLongBits) { clinit(); if((BIT_MARKER & valueLongBits) != BIT_MARKER) { return nullptr; } auto result = new ::int8_tArray(VARIABLE_DATA_LENGTH); auto x = valueLongBits; for (auto i = int32_t(0); i < VARIABLE_DATA_LENGTH; i++) { (*result)[i] = static_cast< int8_t >(x); x >>= 8; } switch ((*result)[int32_t(0)]) { case STRING: case BOOLEAN: case ERROR_CODE: case EMPTY: break; default: throw new ::poi::util::RecordFormatException(::java::lang::StringBuilder().append(u"Bad special value code ("_j)->append((*result)[int32_t(0)]) ->append(u")"_j)->toString()); } return new FormulaRecord_SpecialCachedValue(result); } void poi::hssf::record::FormulaRecord_SpecialCachedValue::serialize(::poi::util::LittleEndianOutput* out) { npc(out)->write(_variableData); npc(out)->writeShort(65535); } java::lang::String* poi::hssf::record::FormulaRecord_SpecialCachedValue::formatDebugString() { return ::java::lang::StringBuilder().append(formatValue())->append(u' ') ->append(::poi::util::HexDump::toHex(_variableData))->toString(); } java::lang::String* poi::hssf::record::FormulaRecord_SpecialCachedValue::formatValue() { auto typeCode = getTypeCode(); switch (typeCode) { case STRING: return u"<string>"_j; case BOOLEAN: return getDataValue() == 0 ? u"FALSE"_j : u"TRUE"_j; case ERROR_CODE: return ::poi::ss::formula::eval::ErrorEval::getText(getDataValue()); case EMPTY: return u"<empty>"_j; } return ::java::lang::StringBuilder().append(u"#error(type="_j)->append(typeCode) ->append(u")#"_j)->toString(); } int32_t poi::hssf::record::FormulaRecord_SpecialCachedValue::getDataValue() { return (*_variableData)[DATA_INDEX]; } poi::hssf::record::FormulaRecord_SpecialCachedValue* poi::hssf::record::FormulaRecord_SpecialCachedValue::createCachedEmptyValue() { clinit(); return create(EMPTY, 0); } poi::hssf::record::FormulaRecord_SpecialCachedValue* poi::hssf::record::FormulaRecord_SpecialCachedValue::createForString() { clinit(); return create(STRING, 0); } poi::hssf::record::FormulaRecord_SpecialCachedValue* poi::hssf::record::FormulaRecord_SpecialCachedValue::createCachedBoolean(bool b) { clinit(); return create(BOOLEAN, b ? int32_t(1) : int32_t(0)); } poi::hssf::record::FormulaRecord_SpecialCachedValue* poi::hssf::record::FormulaRecord_SpecialCachedValue::createCachedErrorCode(int32_t errorCode) { clinit(); return create(ERROR_CODE, errorCode); } poi::hssf::record::FormulaRecord_SpecialCachedValue* poi::hssf::record::FormulaRecord_SpecialCachedValue::create(int32_t code, int32_t data) { clinit(); auto vd = (new ::int8_tArray({ static_cast< int8_t >(code) , static_cast< int8_t >(int32_t(0)) , static_cast< int8_t >(data) , static_cast< int8_t >(int32_t(0)) , static_cast< int8_t >(int32_t(0)) , static_cast< int8_t >(int32_t(0)) })); return new FormulaRecord_SpecialCachedValue(vd); } java::lang::String* poi::hssf::record::FormulaRecord_SpecialCachedValue::toString() { return ::java::lang::StringBuilder().append(npc(getClass())->getName())->append(u'[') ->append(formatValue()) ->append(u']')->toString(); } int32_t poi::hssf::record::FormulaRecord_SpecialCachedValue::getValueType() { auto typeCode = getTypeCode(); switch (typeCode) { case STRING: return npc(::poi::ss::usermodel::CellType::STRING)->getCode(); case BOOLEAN: return npc(::poi::ss::usermodel::CellType::BOOLEAN)->getCode(); case ERROR_CODE: return npc(::poi::ss::usermodel::CellType::ERROR)->getCode(); case EMPTY: return npc(::poi::ss::usermodel::CellType::STRING)->getCode(); } throw new ::java::lang::IllegalStateException(::java::lang::StringBuilder().append(u"Unexpected type id ("_j)->append(typeCode) ->append(u")"_j)->toString()); } bool poi::hssf::record::FormulaRecord_SpecialCachedValue::getBooleanValue() { if(getTypeCode() != BOOLEAN) { throw new ::java::lang::IllegalStateException(::java::lang::StringBuilder().append(u"Not a boolean cached value - "_j)->append(formatValue())->toString()); } return getDataValue() != 0; } int32_t poi::hssf::record::FormulaRecord_SpecialCachedValue::getErrorValue() { if(getTypeCode() != ERROR_CODE) { throw new ::java::lang::IllegalStateException(::java::lang::StringBuilder().append(u"Not an error cached value - "_j)->append(formatValue())->toString()); } return getDataValue(); } extern java::lang::Class *class_(const char16_t *c, int n); java::lang::Class* poi::hssf::record::FormulaRecord_SpecialCachedValue::class_() { static ::java::lang::Class* c = ::class_(u"org.apache.poi.hssf.record.FormulaRecord.SpecialCachedValue", 59); return c; } java::lang::Class* poi::hssf::record::FormulaRecord_SpecialCachedValue::getClass0() { return class_(); }
#include <gtest/gtest.h> #include <vector> #include <string> #include <Error.hpp> #include <parser/ParserContext.hpp> #include <parser/Name.hpp> #include <Helper.hpp> TEST(unittest_Name, empty){ std::vector<char> data(create_char_vector("\"\"")); ParserContext context(data); Name result(context); EXPECT_EQ(context.cursor, data.end()); EXPECT_TRUE(result.has_value()); EXPECT_EQ(*result, ""); } TEST(unittest_Name, regular){ std::vector<char> data(create_char_vector("\"abcd\"")); ParserContext context(data); Name result(context); EXPECT_EQ(context.cursor, data.end()); EXPECT_TRUE(result.has_value()); EXPECT_EQ(*result, "abcd"); } TEST(unittest_Name, hexdigit){ std::vector<char> data(create_char_vector("\"\\0a\"")); ParserContext context(data); Name result(context); EXPECT_EQ(context.cursor, data.end()); EXPECT_TRUE(result.has_value()); EXPECT_EQ(*result, "\x0a"); } TEST(unittest_Name, invalid_character_single){ std::vector<char> data(create_char_vector("\"\\90\"")); ParserContext context(data); Name* result = nullptr; EXPECT_THROW(result = new Name(context), Error<ErrorType::ParseError>); delete result; } TEST(unittest_Name, character_double){ std::vector<char> data(create_char_vector("\"\\c1\\a0\"")); ParserContext context(data); Name result(context); EXPECT_EQ(context.cursor, data.end()); EXPECT_TRUE(result.has_value()); EXPECT_EQ(*result, "\xc1\xa0"); } TEST(unittest_Name, invalid_character_double){ std::vector<char> data(create_char_vector("\"\\c1\\d1\"")); ParserContext context(data); Name* result = nullptr; EXPECT_THROW(result = new Name(context), Error<ErrorType::ParseError>); delete result; } TEST(unittest_Name, invalid_character_double_big){ std::vector<char> data(create_char_vector("\"\\e1\\a0\"")); ParserContext context(data); Name* result = nullptr; EXPECT_THROW(result = new Name(context), Error<ErrorType::ParseError>); delete result; } TEST(unittest_Name, character_triple){ std::vector<char> data(create_char_vector("\"\\e1\\82\\83\"")); ParserContext context(data); Name result(context); EXPECT_EQ(context.cursor, data.end()); EXPECT_TRUE(result.has_value()); EXPECT_EQ(*result, "\xe1\x82\x83"); } TEST(unittest_Name, invalid_character_triple){ std::vector<char> data(create_char_vector("\"\\e1\\93\\c2\"")); ParserContext context(data); Name* result = nullptr; EXPECT_THROW(result = new Name(context), Error<ErrorType::ParseError>); delete result; } TEST(unittest_Name, invalid_character_triple_big){ std::vector<char> data(create_char_vector("\"\\f1\\84\\A3\"")); ParserContext context(data); Name* result = nullptr; EXPECT_THROW(result = new Name(context), Error<ErrorType::ParseError>); delete result; } TEST(unittest_Name, invalid_character_triple_preserved_range){ std::vector<char> data(create_char_vector("\"\\ed\\A4\\80\"")); ParserContext context(data); Name* result = nullptr; EXPECT_THROW(result = new Name(context), Error<ErrorType::ParseError>); delete result; } TEST(unittest_Name, character_quadruple){ std::vector<char> data(create_char_vector("\"\\f1\\86\\83\\84\"")); ParserContext context(data); Name result(context); EXPECT_EQ(context.cursor, data.end()); EXPECT_TRUE(result.has_value()); EXPECT_EQ(*result, "\xf1\x86\x83\x84"); } TEST(unittest_Name, invalid_character_quadruple){ std::vector<char> data(create_char_vector("\"\\f1\\86\\82\\f0\"")); ParserContext context(data); Name* result = nullptr; EXPECT_THROW(result = new Name(context), Error<ErrorType::ParseError>); delete result; } TEST(unittest_Name, invalid_character_quadruple_big){ std::vector<char> data(create_char_vector("\"\\f4\\93\\82\\84\"")); ParserContext context(data); Name* result = nullptr; EXPECT_THROW(result = new Name(context), Error<ErrorType::ParseError>); delete result; }
/* pbrt source code Copyright(c) 1998-2010 Matt Pharr and Greg Humphreys. This file is part of pbrt. pbrt is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Note that the text contents of the book "Physically Based Rendering" are *not* licensed under the GNU GPL. pbrt is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ // core/camera.cpp* #include "stdafx.h" #include "camera.h" #include "film.h" #include "montecarlo.h" #include "sampler.h" // Camera Method Definitions Camera::~Camera() { delete film; } Camera::Camera(const AnimatedTransform &cam2world, float sopen, float sclose, Film *f) : CameraToWorld(cam2world), shutterOpen(sopen), shutterClose(sclose) { film = f; if (CameraToWorld.HasScale()) Warning("Scaling detected in world-to-camera transformation!\n" "The system has numerous assumptions, implicit and explicit,\n" "that this transform will have no scale factors in it.\n" "Proceed at your own risk; your image may have errors or\n" "the system may crash as a result of this."); } float Camera::GenerateRayDifferential(const CameraSample &sample, RayDifferential *rd) const { float wt = GenerateRay(sample, rd); // Find ray after shifting one pixel in the $x$ direction CameraSample sshift = sample; ++(sshift.imageX); Ray rx; float wtx = GenerateRay(sshift, &rx); rd->rxOrigin = rx.o; rd->rxDirection = rx.d; // Find ray after shifting one pixel in the $y$ direction --(sshift.imageX); ++(sshift.imageY); Ray ry; float wty = GenerateRay(sshift, &ry); rd->ryOrigin = ry.o; rd->ryDirection = ry.d; if (wtx == 0.f || wty == 0.f) return 0.f; rd->hasDifferentials = true; return wt; } ProjectiveCamera::ProjectiveCamera(const AnimatedTransform &cam2world, const Transform &proj, const float screenWindow[4], float sopen, float sclose, float lensr, float focald, Film *f) : Camera(cam2world, sopen, sclose, f) { // Initialize depth of field parameters lensRadius = lensr; focalDistance = focald; // Compute projective camera transformations CameraToScreen = proj; // Compute projective camera screen transformations ScreenToRaster = Scale(float(film->xResolution), float(film->yResolution), 1.f) * Scale(1.f / (screenWindow[1] - screenWindow[0]), 1.f / (screenWindow[2] - screenWindow[3]), 1.f) * Translate(Vector(-screenWindow[0], -screenWindow[3], 0.f)); RasterToScreen = Inverse(ScreenToRaster); RasterToCamera = Inverse(CameraToScreen) * RasterToScreen; }
/* * Copyright (C) Volition, Inc. 1999. All rights reserved. * * All source code herein is the property of Volition, Inc. You may not sell * or otherwise commercially exploit the source or things you created based on the * source. * */ #include "freespace.h" #include "network/multi.h" #include "object/objcollide.h" #include "object/object.h" #include "scripting/scripting.h" #include "scripting/api/objs/vecmath.h" #include "ship/ship.h" #include "stats/scoring.h" #include "weapon/weapon.h" /** * Checks weapon-weapon collisions. * @param pair obj_pair pointer to the two objects. pair->a and pair->b are weapons. * @return 1 if all future collisions between these can be ignored */ int collide_weapon_weapon( obj_pair * pair ) { float A_radius, B_radius; object *A = pair->a; object *B = pair->b; Assert( A->type == OBJ_WEAPON ); Assert( B->type == OBJ_WEAPON ); // Don't allow ship to shoot down its own missile. if (A->parent_sig == B->parent_sig) return 1; // Only shoot down teammate's missile if not traveling in nearly same direction. if (Weapons[A->instance].team == Weapons[B->instance].team) if (vm_vec_dot(&A->orient.vec.fvec, &B->orient.vec.fvec) > 0.7f) return 1; // Ignore collisions involving a bomb if the bomb is not yet armed. weapon *wpA, *wpB; weapon_info *wipA, *wipB; wpA = &Weapons[A->instance]; wpB = &Weapons[B->instance]; wipA = &Weapon_info[wpA->weapon_info_index]; wipB = &Weapon_info[wpB->weapon_info_index]; A_radius = A->radius; B_radius = B->radius; if (wipA->weapon_hitpoints > 0) { if (!(wipA->wi_flags[Weapon::Info_Flags::No_radius_doubling])) { A_radius *= 2; // Makes bombs easier to hit } if ((The_mission.ai_profile->flags[AI::Profile_Flags::Aspect_invulnerability_fix]) && (wipA->is_locked_homing()) && (wpA->homing_object != &obj_used_list)) { if ( (wipA->max_lifetime - wpA->lifeleft) < The_mission.ai_profile->delay_bomb_arm_timer[Game_skill_level] ) return 0; } else if ( (wipA->lifetime - wpA->lifeleft) < The_mission.ai_profile->delay_bomb_arm_timer[Game_skill_level] ) return 0; } if (wipB->weapon_hitpoints > 0) { if (!(wipB->wi_flags[Weapon::Info_Flags::No_radius_doubling])) { B_radius *= 2; // Makes bombs easier to hit } if ((The_mission.ai_profile->flags[AI::Profile_Flags::Aspect_invulnerability_fix]) && (wipB->is_locked_homing()) && (wpB->homing_object != &obj_used_list)) { if ( (wipB->max_lifetime - wpB->lifeleft) < The_mission.ai_profile->delay_bomb_arm_timer[Game_skill_level] ) return 0; } else if ( (wipB->lifetime - wpB->lifeleft) < The_mission.ai_profile->delay_bomb_arm_timer[Game_skill_level] ) return 0; } // Rats, do collision detection. if (collide_subdivide(&A->last_pos, &A->pos, A_radius, &B->last_pos, &B->pos, B_radius)) { Script_system.SetHookObjects(4, "Self", A, "Object", B, "Weapon", A, "WeaponB", B); Script_system.SetHookVar("Hitpos", 'o', scripting::api::l_Vector.Set(B->pos)); bool a_override = Script_system.IsConditionOverride(CHA_COLLIDEWEAPON, A); Script_system.RemHookVars({"Self", "Object", "Weapon", "WeaponB", "Hitpos" }); // Yes, this should be reversed. Script_system.SetHookObjects(4, "Self", B, "Object", A, "Weapon", B, "WeaponB", A); Script_system.SetHookVar("Hitpos", 'o', scripting::api::l_Vector.Set(A->pos)); bool b_override = Script_system.IsConditionOverride(CHA_COLLIDEWEAPON, B); Script_system.RemHookVars({ "Self", "Object", "Weapon", "WeaponB", "Hitpos" }); if(!a_override && !b_override) { float aDamage = wipA->damage; if (wipB->armor_type_idx >= 0) aDamage = Armor_types[wipB->armor_type_idx].GetDamage(aDamage, wipA->damage_type_idx, 1.0f); float bDamage = wipB->damage; if (wipA->armor_type_idx >= 0) bDamage = Armor_types[wipA->armor_type_idx].GetDamage(bDamage, wipB->damage_type_idx, 1.0f); if (wipA->weapon_hitpoints > 0) { if (wipB->weapon_hitpoints > 0) { // Two bombs collide, detonate both. if ((wipA->wi_flags[Weapon::Info_Flags::Bomb]) && (wipB->wi_flags[Weapon::Info_Flags::Bomb])) { wpA->lifeleft = 0.01f; wpB->lifeleft = 0.01f; wpA->weapon_flags.set(Weapon::Weapon_Flags::Destroyed_by_weapon); wpB->weapon_flags.set(Weapon::Weapon_Flags::Destroyed_by_weapon); } else { A->hull_strength -= bDamage; B->hull_strength -= aDamage; // safety to make sure either of the weapons die - allow 'bulkier' to keep going if ((A->hull_strength > 0.0f) && (B->hull_strength > 0.0f)) { if (wipA->weapon_hitpoints > wipB->weapon_hitpoints) { B->hull_strength = -1.0f; } else { A->hull_strength = -1.0f; } } if (A->hull_strength < 0.0f) { wpA->lifeleft = 0.01f; wpA->weapon_flags.set(Weapon::Weapon_Flags::Destroyed_by_weapon); } if (B->hull_strength < 0.0f) { wpB->lifeleft = 0.01f; wpB->weapon_flags.set(Weapon::Weapon_Flags::Destroyed_by_weapon); } } } else { A->hull_strength -= bDamage; wpB->lifeleft = 0.01f; wpB->weapon_flags.set(Weapon::Weapon_Flags::Destroyed_by_weapon); if (A->hull_strength < 0.0f) { wpA->lifeleft = 0.01f; wpA->weapon_flags.set(Weapon::Weapon_Flags::Destroyed_by_weapon); } } } else if (wipB->weapon_hitpoints > 0) { B->hull_strength -= aDamage; wpA->lifeleft = 0.01f; wpA->weapon_flags.set(Weapon::Weapon_Flags::Destroyed_by_weapon); if (B->hull_strength < 0.0f) { wpB->lifeleft = 0.01f; wpB->weapon_flags.set(Weapon::Weapon_Flags::Destroyed_by_weapon); } } // single player and multiplayer masters evaluate the scoring and kill stuff if (!MULTIPLAYER_CLIENT) { // If bomb was destroyed, do scoring if (wipA->wi_flags[Weapon::Info_Flags::Bomb]) { //Update stats. -Halleck scoring_eval_hit(A, B, 0); if (wpA->weapon_flags[Weapon::Weapon_Flags::Destroyed_by_weapon]) { scoring_eval_kill_on_weapon(A, B); } } if (wipB->wi_flags[Weapon::Info_Flags::Bomb]) { //Update stats. -Halleck scoring_eval_hit(B, A, 0); if (wpB->weapon_flags[Weapon::Weapon_Flags::Destroyed_by_weapon]) { scoring_eval_kill_on_weapon(B, A); } } } } if(!(b_override && !a_override)) { Script_system.SetHookObjects(4, "Self", A, "Object", B, "Weapon", A, "WeaponB", B); Script_system.SetHookVar("Hitpos", 'o', scripting::api::l_Vector.Set(B->pos)); Script_system.RunCondition(CHA_COLLIDEWEAPON, A, wpA->weapon_info_index); Script_system.RemHookVars({ "Self", "Object", "Weapon", "WeaponB", "Hitpos" }); } else { // Yes, this should be reversed. Script_system.SetHookObjects(4, "Self", B, "Object", A, "Weapon", B, "WeaponB", A); Script_system.SetHookVar("Hitpos", 'o', scripting::api::l_Vector.Set(A->pos)); Script_system.RunCondition(CHA_COLLIDEWEAPON, B, wpB->weapon_info_index); Script_system.RemHookVars({ "Self", "Object", "Weapon", "WeaponB", "Hitpos" }); } return 1; } return 0; }
#include <test.hpp> #include <protozero/types.hpp> TEST_CASE("default constructed data_view") { protozero::data_view view; REQUIRE(view.data() == nullptr); REQUIRE(view.size() == 0); } TEST_CASE("data_view from C string") { protozero::data_view view{"foobar"}; REQUIRE(view.data()); REQUIRE(view.size() == 6); } TEST_CASE("data_view from std::string") { std::string str{"foobar"}; protozero::data_view view{str}; REQUIRE(view.data()); REQUIRE(view.size() == 6); } TEST_CASE("data_view from ptr, size") { std::string str{"foobar"}; protozero::data_view view{str.data(), str.size()}; REQUIRE(view.data()); REQUIRE(view.size() == 6); } TEST_CASE("convert data_view to std::string") { protozero::data_view view{"foobar"}; std::string s = std::string(view); REQUIRE(s == "foobar"); REQUIRE(std::string(view) == "foobar"); REQUIRE(view.to_string() == "foobar"); } TEST_CASE("converting default constructed data_view to string fails") { protozero::data_view view; REQUIRE_THROWS_AS({ view.to_string(); }, assert_error); } TEST_CASE("swapping data_view") { protozero::data_view view1{"foo"}; protozero::data_view view2{"bar"}; REQUIRE(view1.to_string() == "foo"); REQUIRE(view2.to_string() == "bar"); using std::swap; swap(view1, view2); REQUIRE(view2.to_string() == "foo"); REQUIRE(view1.to_string() == "bar"); } TEST_CASE("comparing data_views") { protozero::data_view v1{"foo"}; protozero::data_view v2{"bar"}; protozero::data_view v3{"foox"}; protozero::data_view v4{"foo"}; REQUIRE_FALSE(v1 == v2); REQUIRE_FALSE(v1 == v3); REQUIRE(v1 == v4); REQUIRE_FALSE(v2 == v3); REQUIRE_FALSE(v2 == v4); REQUIRE_FALSE(v3 == v4); REQUIRE(v1 != v2); REQUIRE(v1 != v3); REQUIRE_FALSE(v1 != v4); REQUIRE(v2 != v3); REQUIRE(v2 != v4); REQUIRE(v3 != v4); }
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* FragTrap.cpp :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: graja <graja@student.42wolfsburg.de> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2022/02/12 13:31:19 by graja #+# #+# */ /* Updated: 2022/02/17 11:21:21 by graja ### ########.fr */ /* */ /* ************************************************************************** */ #include "FragTrap.hpp" //Constructors FragTrap::FragTrap(void) : ClapTrap() { std::cout << "FragTrap default constructor called" << std::endl; this->_hitpts = 100; this->_energypts = 100; this->_attckdmg = 30; std::cout << _hitpts << ", " << _energypts << ", "; std::cout << _attckdmg << std::endl; std::cout << std::endl; } FragTrap::FragTrap(std::string const name) : ClapTrap(name) { std::cout << "FragTrap constructor called with name, " << name << std::endl; this->_hitpts = 100; this->_energypts = 100; this->_attckdmg = 30; std::cout << _hitpts << ", " << _energypts << ", "; std::cout << _attckdmg << std::endl; std::cout << std::endl; } FragTrap::FragTrap(const FragTrap &cpy) : ClapTrap(cpy) { std::cout << "FragTrap copy constructor called" << std::endl; *this = cpy; std::cout << _hitpts << ", " << _energypts << ", "; std::cout << _attckdmg << std::endl; std::cout << std::endl; } //Destructor FragTrap::~FragTrap(void) { std::cout << "FragTrap destructor called" << std::endl; std::cout << this->_name << " says good bye" << std::endl; std::cout << std::endl; } //Operator overload for '=' FragTrap& FragTrap::operator=(const FragTrap &ovr) { this->_name = ovr._name; this->_hitpts = ovr._hitpts; this->_energypts = ovr._energypts; this->_attckdmg = ovr._attckdmg; return (*this); } //Member functions void FragTrap::highFivesGuys(void) { std::cout << "FragTrap HighFivesGuys" << std::endl; std::cout << this->_name << " requests for a HighFive!" << std::endl; std::cout << std::endl; }
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2014-2017 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #include <inviwo/core/datastructures/image/layerdisk.h> namespace inviwo { LayerDisk::LayerDisk(LayerType type, const SwizzleMask& swizzleMask) : LayerRepresentation(size2_t(0), type, DataFormatBase::get()) , DiskRepresentation<LayerRepresentation>() , swizzleMask_(swizzleMask) {} LayerDisk::LayerDisk(std::string url, LayerType type, const SwizzleMask& swizzleMask) : LayerRepresentation(size2_t(0), type, DataFormatBase::get()) , DiskRepresentation<LayerRepresentation>(url) , swizzleMask_(swizzleMask) {} LayerDisk::LayerDisk(const LayerDisk& rhs) : LayerRepresentation(rhs) , DiskRepresentation<LayerRepresentation>(rhs) , swizzleMask_(rhs.swizzleMask_) {} LayerDisk& LayerDisk::operator=(const LayerDisk& that) { if (this != &that) { LayerRepresentation::operator=(that); DiskRepresentation<LayerRepresentation>::operator=(that); swizzleMask_ = that.swizzleMask_; } return *this; } LayerDisk::~LayerDisk() {} LayerDisk* LayerDisk::clone() const { return new LayerDisk(*this); } void LayerDisk::setDimensions(size2_t dimensions) { dimensions_ = dimensions; updateBaseMetaFromRepresentation(); } bool LayerDisk::copyRepresentationsTo(LayerRepresentation*) const { return false; } void LayerDisk::updateDataFormat(const DataFormatBase* format) { setDataFormat(format); } std::type_index LayerDisk::getTypeIndex() const { return std::type_index(typeid(LayerDisk)); } void LayerDisk::setSwizzleMask(const SwizzleMask &mask) { swizzleMask_ = mask; updateBaseMetaFromRepresentation(); } SwizzleMask LayerDisk::getSwizzleMask() const { return swizzleMask_; } } // namespace
#include "fslock.h" #include <fcntl.h> #include <semaphore.h> #include <sys/stat.h> #include <sys/types.h> #include <sys/wait.h> #include <unistd.h> #include "3rd-party/catch.hpp" #include "test-helpers/tempdir.h" #include "test-helpers/tempfile.h" using namespace newsboat; // Forks and calls FsLock::try_lock() in the child process. class LockProcess { public: LockProcess(std::string lock_location) { sem_start = sem_open(sem_start_name, O_CREAT, 0644, 0); sem_stop = sem_open(sem_stop_name, O_CREAT, 0644, 0); pid = ::fork(); if (pid == -1) { FAIL("LockProcess: fork() failed"); } else if (pid > 0) { // Parent process: Wait until child process has finished calling try_lock sem_wait(sem_start); } else { // Child process: Call try_lock, signal parent to continue, wait for parent process signal to stop pid_t ignore_pid; FsLock lock; std::string error_message; lock.try_lock(lock_location, ignore_pid, error_message); sem_post(sem_start); sem_wait(sem_stop); // Exit directly without running destructors (making sure we don't interfere with the original process) _exit(0); } } pid_t get_child_pid() { return pid; } ~LockProcess() { if (pid > 0) { // Parent process: Signal child process to exit and wait for it to finish sem_post(sem_stop); ::waitpid(pid, nullptr, 0); REQUIRE(sem_unlink(sem_start_name) == 0); REQUIRE(sem_unlink(sem_stop_name) == 0); REQUIRE(sem_close(sem_start) == 0); REQUIRE(sem_close(sem_stop) == 0); } } private: const char* sem_start_name = "/newsboat-test-fslock-start"; const char* sem_stop_name = "/newsboat-test-fslock-stop"; pid_t pid; sem_t* sem_start; sem_t* sem_stop; }; TEST_CASE("try_lock() returns an error if lock-file permissions or location are invalid", "[FsLock]") { GIVEN("An invalid lock location") { const TestHelpers::TempDir test_directory; const std::string non_existing_dir = test_directory.get_path() + "does-not-exist/"; const std::string lock_location = non_existing_dir + "lockfile"; THEN("try_lock() will fail and return pid == 0") { FsLock lock; pid_t pid = -1; // try_lock() is expected to fail as the relevant directory does not exist std::string error_message; REQUIRE_FALSE(lock.try_lock(lock_location, pid, error_message)); REQUIRE(pid == 0); REQUIRE(error_message.length() > 0); } } GIVEN("A lock file which does not grant write access") { const TestHelpers::TempFile lock_location; const int fd = ::open(lock_location.get_path().c_str(), O_RDWR | O_CREAT, 0400); ::close(fd); THEN("try_lock() will fail and return pid == 0") { FsLock lock; pid_t pid = -1; std::string error_message; REQUIRE_FALSE(lock.try_lock(lock_location.get_path(), pid, error_message)); REQUIRE(pid == 0); REQUIRE(error_message.length() > 0); } } } TEST_CASE("try_lock() fails if lock was already created", "[FsLock]") { const TestHelpers::TempFile lock_location; WHEN("A different process has called try_lock()") { LockProcess lock_process(lock_location.get_path()); FsLock lock; pid_t pid = 0; THEN("Calling try_lock() for the same lock location will fail") { std::string error_message; REQUIRE_FALSE(lock.try_lock(lock_location.get_path(), pid, error_message)); } THEN("try_lock() returns the pid of the process holding the lock") { std::string error_message; REQUIRE_FALSE(lock.try_lock(lock_location.get_path(), pid, error_message)); REQUIRE(pid == lock_process.get_child_pid()); } } } TEST_CASE("try_lock() succeeds if lock file location is valid and not locked by a different process", "[FsLock]") { const TestHelpers::TempFile lock_location; FsLock lock; pid_t pid = 0; std::string error_message; REQUIRE(lock.try_lock(lock_location.get_path(), pid, error_message)); SECTION("The lock file exists after a call to try_lock()") { REQUIRE(0 == ::access(lock_location.get_path().c_str(), F_OK)); } SECTION("Calling try_lock() a second time for the same location succeeds") { REQUIRE(lock.try_lock(lock_location.get_path(), pid, error_message)); } SECTION("Calling try_lock() a second time with a different location succeeds and cleans up old lock file") { const TestHelpers::TempFile new_lock_location; REQUIRE(0 == ::access(lock_location.get_path().c_str(), F_OK)); REQUIRE(lock.try_lock(new_lock_location.get_path(), pid, error_message)); REQUIRE(0 != ::access(lock_location.get_path().c_str(), F_OK)); REQUIRE(0 == ::access(new_lock_location.get_path().c_str(), F_OK)); } }
// Copyright 2016 The TensorFlow Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ============================================================================= // FinishedNodes returns a 1-D tensor listing the nodes that are finished // accumulating. #include "tensorflow/contrib/tensor_forest/kernels/tree_utils.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/kernels/bounds_check.h" #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/util/work_sharder.h" namespace tensorflow { using std::placeholders::_1; using std::placeholders::_2; using tensorforest::CheckTensorBounds; using tensorforest::BestSplitDominatesClassificationBootstrap; using tensorforest::BestSplitDominatesClassificationChebyshev; using tensorforest::BestSplitDominatesClassificationHoeffding; using tensorforest::BestSplitDominatesRegression; namespace { struct EvaluateParams { Tensor leaves; Tensor node_to_accumulator; Tensor accumulator_sums; Tensor birth_epochs; int current_epoch; int32 num_split_after_samples; int32 min_split_samples; int32 check_dominates_every_samples; bool need_random; int64 random_seed; std::function<bool(int, random::SimplePhilox*)> dominate_method; }; void Evaluate(const EvaluateParams& params, mutex* mutex, int32 start, int32 end, std::unordered_set<int32>* final_finished_leaves, std::unordered_set<int32>* final_stale) { const auto leaves = params.leaves.unaligned_flat<int32>(); const auto node_map = params.node_to_accumulator.unaligned_flat<int32>(); const auto sums = params.accumulator_sums.tensor<float, 2>(); const auto start_epochs = params.birth_epochs.unaligned_flat<int32>(); const int32 num_accumulators = static_cast<int32>(params.accumulator_sums.shape().dim_size(0)); std::vector<int32> finished_leaves; std::vector<int32> stale; std::unique_ptr<random::SimplePhilox> simple_philox; random::PhiloxRandom rnd_gen(params.random_seed); if (params.need_random) { simple_philox.reset(new random::SimplePhilox(&rnd_gen)); } std::unordered_set<int32> visited; for (int32 i = start; i < end; i++) { const int32 leaf = internal::SubtleMustCopy(leaves(i)); if (leaf == -1 || visited.find(leaf) != visited.end()) { continue; } if (!FastBoundsCheck(leaf, node_map.size())) { LOG(ERROR) << "leaf " << leaf << " not in valid range."; } const int32 accumulator = internal::SubtleMustCopy(node_map(leaf)); if (accumulator < 0) { continue; } if (!FastBoundsCheck(accumulator, num_accumulators)) { LOG(ERROR) << "accumulator " << accumulator << " not in valid range."; } // The first column holds the number of samples seen. // For classification, this should be the sum of the other columns. int32 count = sums(accumulator, 0); if (params.current_epoch > start_epochs(leaf) + 1) { if (count >= params.min_split_samples) { finished_leaves.push_back(leaf); } else { stale.push_back(leaf); } continue; } if (count >= params.num_split_after_samples) { finished_leaves.push_back(leaf); continue; } if (count < params.min_split_samples) { continue; } if (count % params.check_dominates_every_samples != 0) { continue; } bool finished = params.dominate_method(accumulator, simple_philox.get()); if (finished) { finished_leaves.push_back(leaf); } visited.insert(leaf); } mutex_lock m(*mutex); final_finished_leaves->insert(finished_leaves.begin(), finished_leaves.end()); final_stale->insert(stale.begin(), stale.end()); } } // namespace class FinishedNodes : public OpKernel { public: explicit FinishedNodes(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr( "regression", &regression_)); OP_REQUIRES_OK(context, context->GetAttr( "num_split_after_samples", &num_split_after_samples_)); OP_REQUIRES_OK(context, context->GetAttr( "min_split_samples", &min_split_samples_)); OP_REQUIRES_OK(context, context->GetAttr( "dominate_fraction", &dominate_fraction_)); OP_REQUIRES_OK(context, context->GetAttr("dominate_method", &dominate_method_)); OP_REQUIRES_OK(context, context->GetAttr("random_seed", &random_seed_)); OP_REQUIRES_OK(context, context->GetAttr("check_dominates_every_samples", &check_dominates_every_samples_)); } void Compute(OpKernelContext* context) override { const Tensor& leaf_tensor = context->input(0); const Tensor& node_to_accumulator = context->input(1); const Tensor& split_sums = context->input(2); const Tensor& split_squares = context->input(3); const Tensor& accumulator_sums = context->input(4); const Tensor& accumulator_squares = context->input(5); const Tensor& birth_epochs = context->input(6); const Tensor& current_epoch = context->input(7); OP_REQUIRES(context, leaf_tensor.shape().dims() == 1, errors::InvalidArgument( "leaf_tensor should be one-dimensional")); OP_REQUIRES(context, node_to_accumulator.shape().dims() == 1, errors::InvalidArgument( "node_to_accumulator should be one-dimensional")); OP_REQUIRES(context, split_sums.shape().dims() == 3, errors::InvalidArgument( "split_sums should be three-dimensional")); OP_REQUIRES(context, accumulator_sums.shape().dims() == 2, errors::InvalidArgument( "accumulator_sums should be two-dimensional")); OP_REQUIRES(context, birth_epochs.shape().dims() == 1, errors::InvalidArgument( "birth_epochs should be one-dimensional")); OP_REQUIRES( context, birth_epochs.shape().dim_size(0) == node_to_accumulator.shape().dim_size(0), errors::InvalidArgument( "birth_epochs and node_to_accumulator should be the same size.")); // Check tensor bounds. if (!CheckTensorBounds(context, leaf_tensor)) return; if (!CheckTensorBounds(context, node_to_accumulator)) return; if (!CheckTensorBounds(context, split_sums)) return; if (!CheckTensorBounds(context, split_squares)) return; if (!CheckTensorBounds(context, accumulator_sums)) return; if (!CheckTensorBounds(context, accumulator_squares)) return; if (!CheckTensorBounds(context, birth_epochs)) return; if (!CheckTensorBounds(context, current_epoch)) return; const int32 epoch = current_epoch.unaligned_flat<int32>()(0); const int32 num_leaves = static_cast<int32>( leaf_tensor.shape().dim_size(0)); auto worker_threads = context->device()->tensorflow_cpu_worker_threads(); int num_threads = worker_threads->num_threads; EvaluateParams params; params.leaves = leaf_tensor; params.node_to_accumulator = node_to_accumulator; params.accumulator_sums = accumulator_sums; params.birth_epochs = birth_epochs; params.current_epoch = epoch; params.min_split_samples = min_split_samples_; params.num_split_after_samples = num_split_after_samples_; params.need_random = false; params.check_dominates_every_samples = check_dominates_every_samples_; if (regression_) { params.dominate_method = std::bind(&BestSplitDominatesRegression, accumulator_sums, accumulator_squares, split_sums, split_squares, _1); } else { if (dominate_method_ == "none") { params.dominate_method = [](int, random::SimplePhilox*) { return false; }; } else if (dominate_method_ == "hoeffding") { params.dominate_method = std::bind(&BestSplitDominatesClassificationHoeffding, accumulator_sums, split_sums, _1, dominate_fraction_); } else if (dominate_method_ == "chebyshev") { params.dominate_method = std::bind(&BestSplitDominatesClassificationChebyshev, accumulator_sums, split_sums, _1, dominate_fraction_); } else if (dominate_method_ == "bootstrap") { params.need_random = true; params.random_seed = random_seed_; if (params.random_seed == 0) { params.random_seed = static_cast<uint64>(Env::Default()->NowMicros()); } params.dominate_method = std::bind(&BestSplitDominatesClassificationBootstrap, accumulator_sums, split_sums, _1, dominate_fraction_, _2); } else { LOG(FATAL) << "Unknown dominate method " << dominate_method_; } } std::unordered_set<int32> finished_leaves; std::unordered_set<int32> stale; mutex m; // Require at least 100 leaves per thread. I guess that's about 800 cost // per unit. This isn't well defined. const int64 costPerUnit = 800; auto work = [&params, &finished_leaves, &stale, &m, num_leaves](int64 start, int64 end) { CHECK(start <= end); CHECK(end <= num_leaves); Evaluate(params, &m, static_cast<int32>(start), static_cast<int32>(end), &finished_leaves, &stale); }; Shard(num_threads, worker_threads->workers, num_leaves, costPerUnit, work); // Copy to output. Tensor* output_finished = nullptr; TensorShape finished_shape; finished_shape.AddDim(finished_leaves.size()); OP_REQUIRES_OK(context, context->allocate_output(0, finished_shape, &output_finished)); auto out_finished = output_finished->unaligned_flat<int32>(); std::copy(finished_leaves.begin(), finished_leaves.end(), out_finished.data()); Tensor* output_stale = nullptr; TensorShape stale_shape; stale_shape.AddDim(stale.size()); OP_REQUIRES_OK(context, context->allocate_output(1, stale_shape, &output_stale)); auto out_stale = output_stale->unaligned_flat<int32>(); std::copy(stale.begin(), stale.end(), out_stale.data()); } private: bool regression_; int32 num_split_after_samples_; int32 min_split_samples_; float dominate_fraction_; string dominate_method_; int32 random_seed_; int32 check_dominates_every_samples_; }; REGISTER_KERNEL_BUILDER(Name("FinishedNodes").Device(DEVICE_CPU), FinishedNodes); } // namespace tensorflow
#include <QApplication> #include "MainWindow.h" int main(int argc, char *argv[]) { QApplication app(argc, argv); MainWindow window; window.setWindowTitle("Facetious"); window.show(); return app.exec(); }
#include "CMeshModelInstance.h" #include <Eigen/Dense> #include <igl/per_vertex_normals.h> #include <assert.h> #include <totalmodel.h> #include <ceres/rotation.h> #include <chrono> // Function equivalent and improved from igl::per_vertex_normals // [270, 452] ms template <typename T> inline T getNormTriplet(const T* const ptr) { return std::sqrt(ptr[0]*ptr[0] + ptr[1]*ptr[1] + ptr[2]*ptr[2]); } template <typename T> inline void normalizeTriplet(T* ptr, const T norm) { ptr[0] /= norm; ptr[1] /= norm; ptr[2] /= norm; } template <typename T> inline void normalizeTriplet(T* ptr) { const auto norm = getNormTriplet(ptr); normalizeTriplet(ptr, norm); } void per_vertex_normals( const Eigen::Matrix<double, Eigen::Dynamic, 3, Eigen::RowMajor>& V, const Eigen::Matrix<int, Eigen::Dynamic, 3, Eigen::RowMajor>& F, Eigen::Matrix<double, Eigen::Dynamic, 3, Eigen::RowMajor>& N ) { Eigen::Matrix<double, Eigen::Dynamic,3, Eigen::RowMajor> FN; FN.resize(F.rows(),3); auto* FN_data = FN.data(); const auto* const F_data = F.data(); const auto* const V_data = V.data(); // loop over faces for (int i = 0; i < F.rows();i++) { const auto baseIndex = 3*i; const auto F_data0 = 3*F_data[baseIndex]; const auto F_data1 = 3*F_data[baseIndex+1]; const auto F_data2 = 3*F_data[baseIndex+2]; const Eigen::Matrix<double, 1, 3> v1( V_data[F_data1] - V_data[F_data0], V_data[F_data1+1] - V_data[F_data0+1], V_data[F_data1+2] - V_data[F_data0+2]); const Eigen::Matrix<double, 1, 3> v2( V_data[F_data2] - V_data[F_data0], V_data[F_data2+1] - V_data[F_data0+1], V_data[F_data2+2] - V_data[F_data0+2]); FN.row(i) = v1.cross(v2); auto* fnRowPtr = &FN_data[baseIndex]; const double norm = getNormTriplet(fnRowPtr); if (norm == 0) { fnRowPtr[0] = 0; fnRowPtr[1] = 0; fnRowPtr[2] = 0; } else normalizeTriplet(fnRowPtr, norm); } // Resize for output N.resize(V.rows(),3); std::fill(N.data(), N.data() + N.rows() * N.cols(), 0.0); Eigen::Matrix<double, Eigen::Dynamic, 1> A(F.rows(), 1); auto* A_data = A.data(); const auto Fcols = F.cols(); const auto Vcols = V.cols(); // Projected area helper const auto & proj_doublearea = [&V_data,&F_data, &Vcols, &Fcols](const int x, const int y, const int f) ->double { const auto baseIndex = f*Fcols; const auto baseIndex2 = F_data[baseIndex + 2]*Vcols; const auto rx = V_data[F_data[baseIndex]*Vcols + x] - V_data[baseIndex2 + x]; const auto sx = V_data[F_data[baseIndex + 1]*Vcols + x] - V_data[baseIndex2 + x]; const auto ry = V_data[F_data[baseIndex]*Vcols + y] - V_data[baseIndex2 + y]; const auto sy = V_data[F_data[baseIndex + 1]*Vcols + y] - V_data[baseIndex2 + y]; return rx*sy - ry*sx; }; for (auto f = 0;f<F.rows();f++) { const auto dblAd1 = proj_doublearea(0,1,f); const auto dblAd2 = proj_doublearea(1,2,f); const auto dblAd3 = proj_doublearea(2,0,f); A_data[f] = std::sqrt(dblAd1*dblAd1 + dblAd2*dblAd2 + dblAd3*dblAd3); } auto* N_data = N.data(); // loop over faces for (int i = 0 ; i < F.rows();i++) { const auto baseIndex = i*Fcols; // throw normal at each corner for (int j = 0; j < 3;j++) { // auto* nRowPtr = &N_data[3*F(i,j)]; auto* nRowPtr = &N_data[3*F_data[baseIndex + j]]; const auto* const fnRowPtr = &FN_data[3*i]; for (int subIndex = 0; subIndex < FN.cols(); subIndex++) nRowPtr[subIndex] += A_data[i] * fnRowPtr[subIndex]; // Vector equilvanet // N.row(F(i,j)) += A_data[i] * FN.row(i); } } // take average via normalization // loop over faces for (int i = 0;i<N.rows();i++) normalizeTriplet(&N_data[3*i]); // Matrix equivalent // N.rowwise().normalize(); } void CMeshModelInstance::RecomputeNormal(const TotalModel& model) { // Compute Normal Eigen::Matrix<double, Eigen::Dynamic, 3, Eigen::RowMajor> V_3(m_vertices.size(), 3); auto* V_3_data = V_3.data(); for (int r = 0; r < V_3.rows(); ++r) { auto* v3rowPtr = &V_3_data[3*r]; v3rowPtr[0] = m_vertices[r].x; // V_3(r, 0) v3rowPtr[1] = m_vertices[r].y; // V_3(r, 1) v3rowPtr[2] = m_vertices[r].z; // V_3(r, 2) } // Eigen::MatrixXd NV; Eigen::Matrix<double, Eigen::Dynamic, 3, Eigen::RowMajor> NV; if (m_meshType==MESH_TYPE_SMPL) { std::string errorMessage("Not supporting MESH_TYPE_SMPL currently"); throw std::runtime_error(errorMessage); // igl::per_vertex_normals(V_3, g_smpl.faces_, NV); } if (m_meshType == MESH_TYPE_TOTAL || m_meshType == MESH_TYPE_ADAM) { // igl::per_vertex_normals(V_3, model.m_faces, NV); per_vertex_normals(V_3, model.m_faces, NV); // Eigen::Matrix<double, Eigen::Dynamic, 3, Eigen::RowMajor> NVAux; // igl::per_vertex_normals(V_3, model.m_faces, NVAux); // std::cout << (NV - NVAux).norm() << std::endl; // assert((NV - NVAux).norm() < 1e-6); } m_normals.resize(NV.rows()); auto* NV_data = NV.data(); for (int r = 0; r < NV.rows(); ++r) { const auto* const nvRow = &NV_data[3*r]; m_normals[r] = cv::Point3f(nvRow[0], nvRow[1], nvRow[2]); // cv::Point3f(NV(r, 0), NV(r, 1), NV(r, 2)) } } void CMeshModelInstance::clearMesh() { m_face_vertexIndices.clear(); m_vertices.clear(); m_colors.clear(); m_normals.clear(); m_uvs.clear(); m_joints.clear(); m_joints_regress.clear(); m_alpha.clear(); } bool compareTupleDepth(const std::tuple<double, double, cv::Point3i>& a, const std::tuple<double, double, cv::Point3i>& b) { return std::get<1>(a) > std::get<1>(b); // from far to near } bool compareTupleAlpha(const std::tuple<double, double, cv::Point3i>& a, const std::tuple<double, double, cv::Point3i>& b) { return std::get<0>(a) > std::get<0>(b); // opaque first, then transparent } void CMeshModelInstance::sortFaceDepth(const cv::Point3d angleaxis) { // const auto start = std::chrono::high_resolution_clock::now(); std::vector<double> depth_vertex(m_vertices.size()); if (angleaxis == cv::Point3d(0., 0., 0.)) // no rotation { for (auto i = 0u; i < m_vertices.size(); i++) depth_vertex[i] = m_vertices[i].z; } else { const double angle_axis[3] = {angleaxis.x, angleaxis.y, angleaxis.z}; for (auto i = 0u; i < m_vertices.size(); i++) { const double pt[3] = {m_vertices[i].x, m_vertices[i].y, m_vertices[i].z}; double result[3]; ceres::AngleAxisRotatePoint(angle_axis, pt, result); depth_vertex[i] = result[2]; } } assert(m_face_vertexIndices.size() % 3 == 0); std::vector<std::tuple<double, double, cv::Point3i>> vecSort; const uint num_face = m_face_vertexIndices.size() / 3; vecSort.reserve(num_face); for (auto i = 0u; i < num_face; i++) { const uint I1 = m_face_vertexIndices[3 * i]; const uint I2 = m_face_vertexIndices[3 * i + 1]; const uint I3 = m_face_vertexIndices[3 * i + 2]; const double depth = (depth_vertex[I1] + depth_vertex[I2] + depth_vertex[I3]) / 3; const double alpha = (m_alpha[I1] + m_alpha[I2] + m_alpha[I3]) / 3; // opaqueness vecSort.emplace_back(std::make_tuple(alpha, depth, cv::Point3i(I1, I2, I3))); } std::sort(vecSort.begin(), vecSort.end(), compareTupleAlpha); // first sort according to opacity, put opaque face first auto it = vecSort.begin(); while(it != vecSort.end()) { // find the first element that is not completely opaque if (std::get<0>(*it) != 1.0) break; it++; } std::sort(it, vecSort.end(), compareTupleDepth); // now sort according to depth, far first for (auto i = 0u; i < num_face; i++) { auto& pt = std::get<2>(vecSort[i]); m_face_vertexIndices[3 * i + 0] = pt.x; m_face_vertexIndices[3 * i + 1] = pt.y; m_face_vertexIndices[3 * i + 2] = pt.z; } // const auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::high_resolution_clock::now() - start).count(); // std::cout << "Sort depth duration " << duration * 1e-6 << " ms" << std::endl; }
#include "core/TypeConstraint.h" #include "core/Symbols.h" using namespace std; namespace sorbet::core { bool TypeConstraint::isEmpty() const { return upperBounds.empty() && lowerBounds.empty(); } void TypeConstraint::defineDomain(Context ctx, const InlinedVector<SymbolRef, 4> &typeParams) { // ENFORCE(isEmpty()); // unfortunately this is false. See // test/testdata/infer/generic_methods/countraints_crosstalk.rb for (const auto &tp : typeParams) { ENFORCE(tp.data(ctx)->isTypeArgument()); auto typ = cast_type<TypeVar>(tp.data(ctx)->resultType.get()); ENFORCE(typ != nullptr); if (tp.data(ctx)->isCovariant()) { findLowerBound(typ->sym) = Types::bottom(); } else { findUpperBound(typ->sym) = Types::top(); } } } bool TypeConstraint::solve(Context ctx) { if (cantSolve) { return false; } if (wasSolved) { return true; } // instatiate types to upper bound approximations for (auto &k : upperBounds) { auto &tv = k.first; auto &bound = k.second; if (bound == Types::top()) { continue; } auto approximation = bound->_approximate(ctx, *this); if (approximation) { findSolution(tv) = approximation; } else { ENFORCE(bound->isFullyDefined()); findSolution(tv) = bound; } } // or lower bound approximation, if there is no upper bound for (auto &k : lowerBounds) { auto &tv = k.first; auto &bound = k.second; auto &sol = findSolution(tv); if (sol) { continue; } auto approximation = bound->_approximate(ctx, *this); if (approximation) { sol = approximation; } else { ENFORCE(bound->isFullyDefined()); sol = bound; } } for (auto &k : upperBounds) { auto &tv = k.first; auto &upperBound = k.second; auto &sol = findSolution(tv); if (!sol) { sol = upperBound; } if (upperBound) { cantSolve = !Types::isSubType(ctx, findSolution(tv), upperBound); if (cantSolve) { return false; } } } for (auto &k : lowerBounds) { auto &tv = k.first; auto &lowerBound = k.second; cantSolve = !Types::isSubType(ctx, lowerBound, findSolution(tv)); if (cantSolve) { return false; } } wasSolved = true; return true; } bool TypeConstraint::rememberIsSubtype(Context ctx, const TypePtr &t1, const TypePtr &t2) { ENFORCE(!wasSolved); if (auto t1p = cast_type<TypeVar>(t1.get())) { auto &entry = findUpperBound(t1p->sym); if (!entry) { entry = t2; } else if (t2->isFullyDefined()) { entry = Types::all(ctx, entry, t2); } else { entry = AndType::make_shared(entry, t2); } } else { auto t2p = cast_type<TypeVar>(t2.get()); ENFORCE(t2p != nullptr); auto &entry = findLowerBound(t2p->sym); if (!entry) { entry = t1; } else if (t1->isFullyDefined()) { entry = Types::any(ctx, entry, t1); } else { entry = AndType::make_shared(entry, t1); } } return true; } bool TypeConstraint::isAlreadyASubType(Context ctx, const TypePtr &t1, const TypePtr &t2) const { if (auto t1p = cast_type<TypeVar>(t1.get())) { if (!hasLowerBound(t1p->sym)) { return Types::isSubType(ctx, Types::top(), t2); } return Types::isSubType(ctx, findLowerBound(t1p->sym), t2); } else { auto t2p = cast_type<TypeVar>(t2.get()); ENFORCE(t2p != nullptr); if (!hasUpperBound(t2p->sym)) { return Types::isSubType(ctx, t1, Types::bottom()); } return Types::isSubType(ctx, t1, findUpperBound(t2p->sym)); } } TypePtr TypeConstraint::getInstantiation(SymbolRef sym) const { ENFORCE(wasSolved); return findSolution(sym); } shared_ptr<TypeConstraint> TypeConstraint::deepCopy() const { ENFORCE(!wasSolved); auto res = make_shared<TypeConstraint>(); res->lowerBounds = this->lowerBounds; res->upperBounds = this->upperBounds; return res; } TypeConstraint TypeConstraint::makeEmptyFrozenConstraint() { TypeConstraint res; res.wasSolved = true; return res; } TypeConstraint TypeConstraint::EmptyFrozenConstraint(makeEmptyFrozenConstraint()); bool TypeConstraint::hasUpperBound(SymbolRef forWhat) const { for (auto &entry : this->upperBounds) { if (entry.first == forWhat) { return true; } } return false; } bool TypeConstraint::hasLowerBound(SymbolRef forWhat) const { for (auto &entry : this->lowerBounds) { if (entry.first == forWhat) { return true; } } return false; } TypePtr &TypeConstraint::findUpperBound(SymbolRef forWhat) { for (auto &entry : this->upperBounds) { if (entry.first == forWhat) { return entry.second; } } auto &inserted = this->upperBounds.emplace_back(); inserted.first = forWhat; return inserted.second; } TypePtr &TypeConstraint::findLowerBound(SymbolRef forWhat) { for (auto &entry : this->lowerBounds) { if (entry.first == forWhat) { return entry.second; } } auto &inserted = this->lowerBounds.emplace_back(); inserted.first = forWhat; return inserted.second; } TypePtr &TypeConstraint::findSolution(SymbolRef forWhat) { for (auto &entry : this->solution) { if (entry.first == forWhat) { return entry.second; } } auto &inserted = this->solution.emplace_back(); inserted.first = forWhat; return inserted.second; } TypePtr TypeConstraint::findUpperBound(SymbolRef forWhat) const { for (auto &entry : this->upperBounds) { if (entry.first == forWhat) { return entry.second; } } Exception::raise("should never happen"); } TypePtr TypeConstraint::findLowerBound(SymbolRef forWhat) const { for (auto &entry : this->lowerBounds) { if (entry.first == forWhat) { return entry.second; } } Exception::raise("should never happen"); } TypePtr TypeConstraint::findSolution(SymbolRef forWhat) const { for (auto &entry : this->solution) { if (entry.first == forWhat) { return entry.second; } } Exception::raise("should never happen"); } InlinedVector<SymbolRef, 4> TypeConstraint::getDomain() const { ENFORCE(isSolved()); InlinedVector<SymbolRef, 4> ret; for (auto &entry : this->solution) { ret.emplace_back(entry.first); } return ret; } std::string TypeConstraint::toString(Context ctx) const { fmt::memory_buffer buf; fmt::format_to(buf, "upperBounds: [{}]\n", fmt::map_join( this->upperBounds.begin(), this->upperBounds.end(), ", ", [&ctx](auto pair) -> auto { return fmt::format("{}: {}", pair.first.toString(ctx), pair.second->show(ctx)); })); fmt::format_to(buf, "lowerBounds: [{}]\n", fmt::map_join( this->lowerBounds.begin(), this->lowerBounds.end(), ", ", [&ctx](auto pair) -> auto { return fmt::format("{}: {}", pair.first.toString(ctx), pair.second->show(ctx)); })); fmt::format_to(buf, "solution: [{}]\n", fmt::map_join( this->solution.begin(), this->solution.end(), ", ", [&ctx](auto pair) -> auto { return fmt::format("{}: {}", pair.first.toString(ctx), pair.second->show(ctx)); })); return to_string(buf); } } // namespace sorbet::core
<<<<<<< HEAD /* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ ======= // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. // >>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. // Decodes the blocks generated by block_builder.cc. #include "tensorflow/core/lib/io/block.h" <<<<<<< HEAD #include <algorithm> #include "tensorflow/core/lib/core/coding.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/io/format.h" #include "tensorflow/core/platform/logging.h" ======= #include <vector> #include <algorithm> #include "tensorflow/core/lib/io/format.h" #include "tensorflow/core/lib/core/coding.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/lib/core/errors.h" >>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. namespace tensorflow { namespace table { inline uint32 Block::NumRestarts() const { assert(size_ >= sizeof(uint32)); return core::DecodeFixed32(data_ + size_ - sizeof(uint32)); } Block::Block(const BlockContents& contents) : data_(contents.data.data()), size_(contents.data.size()), owned_(contents.heap_allocated) { if (size_ < sizeof(uint32)) { size_ = 0; // Error marker } else { size_t max_restarts_allowed = (size_ - sizeof(uint32)) / sizeof(uint32); if (NumRestarts() > max_restarts_allowed) { // The size is too small for NumRestarts() size_ = 0; } else { restart_offset_ = size_ - (1 + NumRestarts()) * sizeof(uint32); } } } Block::~Block() { if (owned_) { delete[] data_; } } // Helper routine: decode the next block entry starting at "p", // storing the number of shared key bytes, non_shared key bytes, // and the length of the value in "*shared", "*non_shared", and // "*value_length", respectively. Will not dereference past "limit". // // If any errors are detected, returns NULL. Otherwise, returns a // pointer to the key delta (just past the three decoded values). static inline const char* DecodeEntry(const char* p, const char* limit, uint32* shared, uint32* non_shared, uint32* value_length) { <<<<<<< HEAD if (limit - p < 3) return nullptr; ======= if (limit - p < 3) return NULL; >>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. *shared = reinterpret_cast<const unsigned char*>(p)[0]; *non_shared = reinterpret_cast<const unsigned char*>(p)[1]; *value_length = reinterpret_cast<const unsigned char*>(p)[2]; if ((*shared | *non_shared | *value_length) < 128) { // Fast path: all three values are encoded in one byte each p += 3; } else { <<<<<<< HEAD if ((p = core::GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr; if ((p = core::GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr; if ((p = core::GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr; } if (static_cast<uint32>(limit - p) < (*non_shared + *value_length)) { return nullptr; ======= if ((p = core::GetVarint32Ptr(p, limit, shared)) == NULL) return NULL; if ((p = core::GetVarint32Ptr(p, limit, non_shared)) == NULL) return NULL; if ((p = core::GetVarint32Ptr(p, limit, value_length)) == NULL) return NULL; } if (static_cast<uint32>(limit - p) < (*non_shared + *value_length)) { return NULL; >>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. } return p; } class Block::Iter : public Iterator { private: const char* const data_; // underlying block contents uint32 const restarts_; // Offset of restart array (list of fixed32) uint32 const num_restarts_; // Number of uint32 entries in restart array // current_ is offset in data_ of current entry. >= restarts_ if !Valid uint32 current_; uint32 restart_index_; // Index of restart block in which current_ falls string key_; StringPiece value_; Status status_; inline int Compare(const StringPiece& a, const StringPiece& b) const { return a.compare(b); } // Return the offset in data_ just past the end of the current entry. inline uint32 NextEntryOffset() const { return (value_.data() + value_.size()) - data_; } uint32 GetRestartPoint(uint32 index) { assert(index < num_restarts_); return core::DecodeFixed32(data_ + restarts_ + index * sizeof(uint32)); } void SeekToRestartPoint(uint32 index) { key_.clear(); restart_index_ = index; // current_ will be fixed by ParseNextKey(); // ParseNextKey() starts at the end of value_, so set value_ accordingly uint32 offset = GetRestartPoint(index); value_ = StringPiece(data_ + offset, 0); } public: Iter(const char* data, uint32 restarts, uint32 num_restarts) : data_(data), restarts_(restarts), num_restarts_(num_restarts), current_(restarts_), restart_index_(num_restarts_) { assert(num_restarts_ > 0); } <<<<<<< HEAD bool Valid() const override { return current_ < restarts_; } Status status() const override { return status_; } StringPiece key() const override { assert(Valid()); return key_; } StringPiece value() const override { ======= virtual bool Valid() const { return current_ < restarts_; } virtual Status status() const { return status_; } virtual StringPiece key() const { assert(Valid()); return key_; } virtual StringPiece value() const { >>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. assert(Valid()); return value_; } <<<<<<< HEAD void Next() override { ======= virtual void Next() { >>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. assert(Valid()); ParseNextKey(); } <<<<<<< HEAD void Seek(const StringPiece& target) override { ======= virtual void Seek(const StringPiece& target) { >>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. // Binary search in restart array to find the last restart point // with a key < target uint32 left = 0; uint32 right = num_restarts_ - 1; while (left < right) { uint32 mid = (left + right + 1) / 2; uint32 region_offset = GetRestartPoint(mid); uint32 shared, non_shared, value_length; const char* key_ptr = DecodeEntry(data_ + region_offset, data_ + restarts_, &shared, &non_shared, &value_length); <<<<<<< HEAD if (key_ptr == nullptr || (shared != 0)) { ======= if (key_ptr == NULL || (shared != 0)) { >>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. CorruptionError(); return; } StringPiece mid_key(key_ptr, non_shared); if (Compare(mid_key, target) < 0) { // Key at "mid" is smaller than "target". Therefore all // blocks before "mid" are uninteresting. left = mid; } else { // Key at "mid" is >= "target". Therefore all blocks at or // after "mid" are uninteresting. right = mid - 1; } } // Linear search (within restart block) for first key >= target SeekToRestartPoint(left); while (true) { if (!ParseNextKey()) { return; } if (Compare(key_, target) >= 0) { return; } } } <<<<<<< HEAD void SeekToFirst() override { ======= virtual void SeekToFirst() { >>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. SeekToRestartPoint(0); ParseNextKey(); } private: void CorruptionError() { current_ = restarts_; restart_index_ = num_restarts_; status_ = errors::DataLoss("bad entry in block"); key_.clear(); <<<<<<< HEAD value_ = StringPiece(); ======= value_.clear(); >>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. } bool ParseNextKey() { current_ = NextEntryOffset(); const char* p = data_ + current_; const char* limit = data_ + restarts_; // Restarts come right after data if (p >= limit) { // No more entries to return. Mark as invalid. current_ = restarts_; restart_index_ = num_restarts_; return false; } // Decode next entry uint32 shared, non_shared, value_length; p = DecodeEntry(p, limit, &shared, &non_shared, &value_length); <<<<<<< HEAD if (p == nullptr || key_.size() < shared) { ======= if (p == NULL || key_.size() < shared) { >>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library. CorruptionError(); return false; } else { key_.resize(shared); key_.append(p, non_shared); value_ = StringPiece(p + non_shared, value_length); while (restart_index_ + 1 < num_restarts_ && GetRestartPoint(restart_index_ + 1) < current_) { ++restart_index_; } return true; } } }; Iterator* Block::NewIterator() { if (size_ < sizeof(uint32)) { return NewErrorIterator(errors::DataLoss("bad block contents")); } const uint32 num_restarts = NumRestarts(); if (num_restarts == 0) { return NewEmptyIterator(); } else { return new Iter(data_, restart_offset_, num_restarts); } } } // namespace table } // namespace tensorflow
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // <iterator> // front_insert_iterator // Test nested types and data member: // template <class Container> // class front_insert_iterator // : public iterator<output_iterator_tag, void, void, void, void> // until C++17 // { // protected: // Container* container; // public: // typedef Container container_type; // typedef void value_type; // typedef void difference_type; // until C++20 // typedef ptrdiff_t difference_type; // since C++20 // typedef void reference; // typedef void pointer; // typedef output_iterator_tag iterator_category; // }; #include <iterator> #include <type_traits> #include <vector> #include "test_macros.h" template <class C> struct find_container : private std::front_insert_iterator<C> { explicit find_container(C& c) : std::front_insert_iterator<C>(c) {} void test() {this->container = 0;} }; template <class C> void test() { typedef std::front_insert_iterator<C> R; C c; find_container<C> q(c); q.test(); static_assert((std::is_same<typename R::container_type, C>::value), ""); static_assert((std::is_same<typename R::value_type, void>::value), ""); #if TEST_STD_VER > 17 static_assert((std::is_same<typename R::difference_type, std::ptrdiff_t>::value), ""); #else static_assert((std::is_same<typename R::difference_type, void>::value), ""); #endif static_assert((std::is_same<typename R::reference, void>::value), ""); static_assert((std::is_same<typename R::pointer, void>::value), ""); static_assert((std::is_same<typename R::iterator_category, std::output_iterator_tag>::value), ""); #if TEST_STD_VER <= 14 typedef std::iterator<std::output_iterator_tag, void, void, void, void> iterator_base; static_assert((std::is_base_of<iterator_base, R>::value), ""); #endif } int main(int, char**) { test<std::vector<int> >(); return 0; }
/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <memory> #include <vector> #include "../qsim/lib/channel.h" #include "../qsim/lib/channels_cirq.h" #include "../qsim/lib/circuit.h" #include "../qsim/lib/circuit_noisy.h" #include "../qsim/lib/fuser_mqubit.h" #include "../qsim/lib/gate_appl.h" #include "../qsim/lib/gates_cirq.h" #include "../qsim/lib/io.h" #include "../qsim/lib/qtrajectory.h" #include "../qsim/lib/seqfor.h" #include "../qsim/lib/simmux.h" #include "cirq/google/api/v2/program.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/lib/core/error_codes.pb.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow_quantum/core/ops/parse_context.h" #include "tensorflow_quantum/core/proto/pauli_sum.pb.h" #include "tensorflow_quantum/core/src/util_qsim.h" namespace tfq { using ::cirq::google::api::v2::Program; using ::tensorflow::Status; using ::tfq::proto::PauliSum; typedef qsim::Cirq::GateCirq<float> QsimGate; typedef qsim::Circuit<QsimGate> QsimCircuit; typedef qsim::NoisyCircuit<QsimGate> NoisyQsimCircuit; class TfqNoisyExpectationOp : public tensorflow::OpKernel { public: explicit TfqNoisyExpectationOp(tensorflow::OpKernelConstruction* context) : OpKernel(context) {} void Compute(tensorflow::OpKernelContext* context) override { // TODO (mbbrough): add more dimension checks for other inputs here. const int num_inputs = context->num_inputs(); OP_REQUIRES(context, num_inputs == 5, tensorflow::errors::InvalidArgument(absl::StrCat( "Expected 5 inputs, got ", num_inputs, " inputs."))); // Create the output Tensor. const int output_dim_batch_size = context->input(0).dim_size(0); const int output_dim_op_size = context->input(3).dim_size(1); tensorflow::TensorShape output_shape; output_shape.AddDim(output_dim_batch_size); output_shape.AddDim(output_dim_op_size); tensorflow::Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); auto output_tensor = output->matrix<float>(); std::vector<Program> programs; std::vector<int> num_qubits; std::vector<std::vector<PauliSum>> pauli_sums; OP_REQUIRES_OK(context, GetProgramsAndNumQubits(context, &programs, &num_qubits, &pauli_sums)); std::vector<SymbolMap> maps; OP_REQUIRES_OK(context, GetSymbolMaps(context, &maps)); OP_REQUIRES(context, programs.size() == maps.size(), tensorflow::errors::InvalidArgument(absl::StrCat( "Number of circuits and symbol_values do not match. Got ", programs.size(), " circuits and ", maps.size(), " symbol values."))); std::vector<std::vector<int>> num_samples; OP_REQUIRES_OK(context, GetNumSamples(context, &num_samples)); OP_REQUIRES(context, num_samples.size() == pauli_sums.size(), tensorflow::errors::InvalidArgument(absl::StrCat( "Dimension 0 of num_samples and pauli_sums do not match.", "Got ", num_samples.size(), " lists of sample sizes and ", pauli_sums.size(), " lists of pauli sums."))); OP_REQUIRES( context, context->input(4).dim_size(1) == context->input(3).dim_size(1), tensorflow::errors::InvalidArgument(absl::StrCat( "Dimension 1 of num_samples and pauli_sums do not match.", "Got ", context->input(4).dim_size(1), " lists of sample sizes and ", context->input(3).dim_size(1), " lists of pauli sums."))); // Construct qsim circuits. std::vector<NoisyQsimCircuit> qsim_circuits(programs.size(), NoisyQsimCircuit()); Status parse_status = Status::OK(); auto p_lock = tensorflow::mutex(); auto construct_f = [&](int start, int end) { for (int i = start; i < end; i++) { Status local = NoisyQsimCircuitFromProgram( programs[i], maps[i], num_qubits[i], false, &qsim_circuits[i]); NESTED_FN_STATUS_SYNC(parse_status, local, p_lock); } }; const int num_cycles = 1000; context->device()->tensorflow_cpu_worker_threads()->workers->ParallelFor( programs.size(), num_cycles, construct_f); OP_REQUIRES_OK(context, parse_status); int max_num_qubits = 0; for (const int num : num_qubits) { max_num_qubits = std::max(max_num_qubits, num); } // Cross reference with standard google cloud compute instances // Memory ~= 2 * num_threads * (2 * 64 * 2 ** num_qubits in circuits) // e2s2 = 2 CPU, 8GB -> Can safely do 25 since Memory = 4GB // e2s4 = 4 CPU, 16GB -> Can safely do 25 since Memory = 8GB // ... if (max_num_qubits >= 26) { // If the number of qubits is lager than 24, we switch to an // alternate parallelization scheme with runtime: // O(n_circuits * max_j(num_samples[i])) with parallelization being // multiple threads per wavefunction. ComputeLarge(num_qubits, qsim_circuits, pauli_sums, num_samples, context, &output_tensor); } else { // Runtime: O(n_circuits * max_j(num_samples[i])) with parallelization // being done over number of trials. ComputeSmall(num_qubits, max_num_qubits, qsim_circuits, pauli_sums, num_samples, context, &output_tensor); } } private: void ComputeLarge(const std::vector<int>& num_qubits, const std::vector<NoisyQsimCircuit>& ncircuits, const std::vector<std::vector<PauliSum>>& pauli_sums, const std::vector<std::vector<int>>& num_samples, tensorflow::OpKernelContext* context, tensorflow::TTypes<float, 1>::Matrix* output_tensor) { // Instantiate qsim objects. const auto tfq_for = tfq::QsimFor(context); using Simulator = qsim::Simulator<const tfq::QsimFor&>; using StateSpace = Simulator::StateSpace; using QTSimulator = qsim::QuantumTrajectorySimulator<qsim::IO, QsimGate, qsim::MultiQubitGateFuser, Simulator>; // Begin simulation. int largest_nq = 1; Simulator sim = Simulator(tfq_for); StateSpace ss = StateSpace(tfq_for); auto sv = ss.Create(largest_nq); auto scratch = ss.Create(largest_nq); // Simulate programs one by one. Parallelizing over state vectors // we no longer parallelize over circuits. Each time we encounter a // a larger circuit we will grow the Statevector as necessary. for (int i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; // (#679) Just ignore empty program if (ncircuits[i].channels.size() == 0) { for (int j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; } if (nq > largest_nq) { largest_nq = nq; sv = ss.Create(largest_nq); scratch = ss.Create(largest_nq); } QTSimulator::Parameter param; param.collect_kop_stat = false; param.collect_mea_stat = false; param.normalize_before_mea_gates = true; std::vector<uint64_t> unused_stats; // Track op-wise stats. std::vector<int> run_samples(num_samples[i].size(), 0); std::vector<double> rolling_sums(num_samples[i].size(), 0.0); while (1) { ss.SetStateZero(sv); // time since epoch seeds random generator. unsigned long r_seed = std::chrono::duration_cast<std::chrono::milliseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(); QTSimulator::RunOnce(param, ncircuits[i], r_seed, ss, sim, scratch, sv, unused_stats); // Use this trajectory as a source for all expectation calculations. for (int j = 0; j < pauli_sums[i].size(); j++) { if (run_samples[j] >= num_samples[i][j]) { continue; } float exp_v = 0.0; OP_REQUIRES_OK(context, ComputeExpectationQsim(pauli_sums[i][j], sim, ss, sv, scratch, &exp_v)); rolling_sums[j] += static_cast<double>(exp_v); run_samples[j]++; } bool break_loop = true; for (int j = 0; j < num_samples[i].size(); j++) { if (run_samples[j] < num_samples[i][j]) { break_loop = false; break; } } if (break_loop) { for (int j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) = static_cast<float>(rolling_sums[j]); } break; } } } } void ComputeSmall(const std::vector<int>& num_qubits, const int max_num_qubits, const std::vector<NoisyQsimCircuit>& ncircuits, const std::vector<std::vector<PauliSum>>& pauli_sums, const std::vector<std::vector<int>>& num_samples, tensorflow::OpKernelContext* context, tensorflow::TTypes<float, 1>::Matrix* output_tensor) { using Simulator = qsim::Simulator<const qsim::SequentialFor&>; using StateSpace = Simulator::StateSpace; using QTSimulator = qsim::QuantumTrajectorySimulator<qsim::IO, QsimGate, qsim::MultiQubitGateFuser, Simulator>; const int output_dim_batch_size = output_tensor->dimension(0); std::vector<tensorflow::mutex> batch_locks(output_dim_batch_size, tensorflow::mutex()); const int num_threads = context->device() ->tensorflow_cpu_worker_threads() ->workers->NumThreads(); // [num_threads, batch_size]. std::vector<std::vector<int>> rep_offsets( num_threads, std::vector<int>(output_dim_batch_size, 0)); BalanceTrajectory(num_samples, num_threads, &rep_offsets); output_tensor->setZero(); Status compute_status = Status::OK(); auto c_lock = tensorflow::mutex(); auto DoWork = [&](int start, int end) { // Begin simulation. const auto tfq_for = qsim::SequentialFor(1); int largest_nq = 1; Simulator sim = Simulator(tfq_for); StateSpace ss = StateSpace(tfq_for); auto sv = ss.Create(largest_nq); auto scratch = ss.Create(largest_nq); for (int i = 0; i < ncircuits.size(); i++) { int nq = num_qubits[i]; int rep_offset = rep_offsets[start][i]; // (#679) Just ignore empty program if (ncircuits[i].channels.size() == 0) { for (int j = 0; j < pauli_sums[i].size(); j++) { (*output_tensor)(i, j) = -2.0; } continue; } if (nq > largest_nq) { largest_nq = nq; sv = ss.Create(largest_nq); scratch = ss.Create(largest_nq); } QTSimulator::Parameter param; param.collect_kop_stat = false; param.collect_mea_stat = false; param.normalize_before_mea_gates = true; std::vector<uint64_t> unused_stats; // Track op-wise stats. std::vector<int> run_samples(num_samples[i].size(), 0); std::vector<double> rolling_sums(num_samples[i].size(), 0.0); while (1) { ss.SetStateZero(sv); // time since epoch seeds random generator. unsigned long r_seed = std::chrono::duration_cast<std::chrono::milliseconds>( std::chrono::system_clock::now().time_since_epoch()) .count(); QTSimulator::RunOnce(param, ncircuits[i], r_seed, ss, sim, scratch, sv, unused_stats); // Compute expectations across all ops using this trajectory. for (int j = 0; j < pauli_sums[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] >= p_reps + rep_offset) { continue; } float exp_v = 0.0; NESTED_FN_STATUS_SYNC( compute_status, ComputeExpectationQsim(pauli_sums[i][j], sim, ss, sv, scratch, &exp_v), c_lock); rolling_sums[j] += static_cast<double>(exp_v); run_samples[j]++; } // Check if we have run enough trajectories for all ops. bool break_loop = true; for (int j = 0; j < num_samples[i].size(); j++) { int p_reps = (num_samples[i][j] + num_threads - 1) / num_threads; if (run_samples[j] < p_reps + rep_offset) { break_loop = false; break; } } if (break_loop) { // Lock writing to this batch index in output_tensor. batch_locks[i].lock(); for (int j = 0; j < num_samples[i].size(); j++) { rolling_sums[j] /= num_samples[i][j]; (*output_tensor)(i, j) += static_cast<float>(rolling_sums[j]); } batch_locks[i].unlock(); break; } } } }; // block_size = 1. tensorflow::thread::ThreadPool::SchedulingParams scheduling_params( tensorflow::thread::ThreadPool::SchedulingStrategy::kFixedBlockSize, absl::nullopt, 1); context->device()->tensorflow_cpu_worker_threads()->workers->ParallelFor( num_threads, scheduling_params, DoWork); OP_REQUIRES_OK(context, compute_status); } }; REGISTER_KERNEL_BUILDER( Name("TfqNoisyExpectation").Device(tensorflow::DEVICE_CPU), TfqNoisyExpectationOp); REGISTER_OP("TfqNoisyExpectation") .Input("programs: string") .Input("symbol_names: string") .Input("symbol_values: float") .Input("pauli_sums: string") .Input("num_samples: int32") .Output("expectations: float") .SetShapeFn([](tensorflow::shape_inference::InferenceContext* c) { tensorflow::shape_inference::ShapeHandle programs_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &programs_shape)); tensorflow::shape_inference::ShapeHandle symbol_names_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &symbol_names_shape)); tensorflow::shape_inference::ShapeHandle symbol_values_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 2, &symbol_values_shape)); tensorflow::shape_inference::ShapeHandle pauli_sums_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 2, &pauli_sums_shape)); tensorflow::shape_inference::ShapeHandle num_samples_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 2, &num_samples_shape)); tensorflow::shape_inference::DimensionHandle output_rows = c->Dim(programs_shape, 0); tensorflow::shape_inference::DimensionHandle output_cols = c->Dim(pauli_sums_shape, 1); c->set_output(0, c->Matrix(output_rows, output_cols)); return tensorflow::Status::OK(); }); } // namespace tfq
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <future> // NOLINT #include <ostream> #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/macros.h" #include "paddle/fluid/operators/send_recv_util.h" namespace paddle { namespace operators { class PrefetchOp : public framework::OperatorBase { public: PrefetchOp(const std::string& type, const framework::VariableNameMap& inputs, const framework::VariableNameMap& outputs, const framework::AttributeMap& attrs) : OperatorBase(type, inputs, outputs, attrs) {} void RunImpl(const framework::Scope& scope, const platform::Place& place) const override { auto ins = Inputs("X"); auto outs = Outputs("Out"); std::vector<std::string> epmap = Attr<std::vector<std::string>>("epmap"); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); detail::RPCClient* rpc_client = detail::RPCClient::GetInstance<RPCCLIENT_T>(); for (size_t i = 0; i < ins.size(); i++) { if (NeedSend(scope, ins[i])) { VLOG(3) << "sending " << ins[i] << " to " << epmap[i] << " to get " << outs[i] << " back"; rpc_client->AsyncPrefetchVar(epmap[i], ctx, scope, ins[i], outs[i]); } else { VLOG(3) << "don't send no-initialied variable: " << ins[i]; } } rpc_client->Wait(); } }; class PrefetchOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() { AddInput("X", "(LoDTensor) Input Id variables to be sent").AsDuplicable(); AddOutput("Out", "(LoDTensor) result " "to be fetched from parameter server") .AsDuplicable(); AddAttr<std::vector<std::string>>( "epmap", "(string vector, default 127.0.0.1:6164)" "Server endpoints in the order of input variables for mapping") .SetDefault({"127.0.0.1:6164"}); AddComment(R"DOC( Prefetch operator This operator will send Ids variables to listen_and_serve op at the parameter server and fetch result back. )DOC"); } }; class PrefetchOpShapeInference : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext* ctx) const override {} }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(prefetch, ops::PrefetchOp, paddle::framework::EmptyGradOpMaker, ops::PrefetchOpMaker, ops::PrefetchOpShapeInference);
// Copyright (C) 2014-2017 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef VSOMEIP_NETLINK_CONNECTOR_HPP #define VSOMEIP_NETLINK_CONNECTOR_HPP #ifndef _WIN32 #include <sys/socket.h> #include <linux/netlink.h> #include <linux/rtnetlink.h> #include <map> #include <mutex> #include <boost/asio/ip/address.hpp> #include <boost/asio/basic_raw_socket.hpp> #include "../../endpoints/include/buffer.hpp" namespace vsomeip { template <typename Protocol> class nl_endpoint { public: /// The protocol type associated with the endpoint. typedef Protocol protocol_type; typedef boost::asio::detail::socket_addr_type data_type; /// Default constructor. nl_endpoint() { sockaddr.nl_family = PF_NETLINK; sockaddr.nl_groups = 0; sockaddr.nl_pid = getpid(); } /// Construct an endpoint using the specified path name. nl_endpoint(int group, int pid=getpid()) { sockaddr.nl_family = PF_NETLINK; sockaddr.nl_groups = group; sockaddr.nl_pid = pid; } /// Copy constructor. nl_endpoint(const nl_endpoint& other) { sockaddr = other.sockaddr; } /// Assign from another endpoint. nl_endpoint& operator=(const nl_endpoint& other) { sockaddr = other.sockaddr; return *this; } /// The protocol associated with the endpoint. protocol_type protocol() const { return protocol_type(); } /// Get the underlying endpoint in the native type. data_type* data() { return &sockaddr; } /// Get the underlying endpoint in the native type. const data_type* data() const { return reinterpret_cast<const struct sockaddr*>(&sockaddr); } /// Get the underlying size of the endpoint in the native type. std::size_t size() const { return sizeof(sockaddr); } /// Set the underlying size of the endpoint in the native type. void resize(std::size_t size) { /* nothing we can do here */ } /// Get the capacity of the endpoint in the native type. std::size_t capacity() const { return sizeof(sockaddr); } private: sockaddr_nl sockaddr; }; class nl_protocol { public: nl_protocol() { proto = 0; } nl_protocol(int proto) { this->proto = proto; } /// Obtain an identifier for the type of the protocol. int type() const { return SOCK_RAW; } /// Obtain an identifier for the protocol. int protocol() const { return proto; } /// Obtain an identifier for the protocol family. int family() const { return PF_NETLINK; } typedef nl_endpoint<nl_protocol> endpoint; typedef boost::asio::basic_raw_socket<nl_protocol> socket; private: int proto; }; typedef std::function< void (bool, std::string, bool) > net_if_changed_handler_t; class netlink_connector : public std::enable_shared_from_this<netlink_connector> { public: netlink_connector(boost::asio::io_service& _io, boost::asio::ip::address _address, boost::asio::ip::address _multicast_address): net_if_index_for_address_(0), handler_(nullptr), socket_(_io), recv_buffer_(recv_buffer_size, 0), address_(_address), multicast_address_(_multicast_address) { } ~netlink_connector() {} void register_net_if_changes_handler(net_if_changed_handler_t _handler); void unregister_net_if_changes_handler(); void start(); void stop(); private: bool has_address(struct ifaddrmsg * ifa_struct, size_t length, const unsigned int address); void send_ifa_request(); void send_ifi_request(); void send_rt_request(); void receive_cbk(boost::system::error_code const &_error, std::size_t _bytes); void send_cbk(boost::system::error_code const &_error, std::size_t _bytes); bool check_sd_multicast_route_match(struct rtmsg* _routemsg, size_t _length, std::string* _routename) const; std::map<int, unsigned int> net_if_flags_; int net_if_index_for_address_; net_if_changed_handler_t handler_; std::mutex socket_mutex_; boost::asio::basic_raw_socket<nl_protocol> socket_; const size_t recv_buffer_size = 16384; message_buffer_t recv_buffer_; boost::asio::ip::address address_; boost::asio::ip::address multicast_address_; }; } #endif // NOT _WIN32 #endif // VSOMEIP_NETLINK_CONNECTOR_HPP
#pragma once #include <array> #include <pluginterfaces/base/ipluginbase.h> NS_HWM_BEGIN class ClassInfo2Data { public: ClassInfo2Data(Steinberg::PClassInfo2 const &info); ClassInfo2Data(Steinberg::PClassInfoW const &info); String const & GetSubCategories() const { return sub_categories_; } String const & GetVendor() const { return vendor_; } String const & GetVersion() const { return version_; } String const & GetSDKVersion() const { return sdk_version_; } bool HasSubCategory(String elem) const; private: String sub_categories_; String vendor_; String version_; String sdk_version_; }; class ClassInfo { public: static constexpr UInt32 kCIDLength = 16; using CID = std::array<Steinberg::int8, kCIDLength>; ClassInfo(); ClassInfo(Steinberg::PClassInfo const &info); ClassInfo(Steinberg::PClassInfo2 const &info); ClassInfo(Steinberg::PClassInfoW const &info); CID const & GetCID() const { return cid_; } String const & GetName() const { return name_; } String const & GetCategory() const { return category_; } Steinberg::int32 GetCardinality() const { return cardinality_; } bool HasClassInfo2() const { return static_cast<bool>(classinfo2_data_); } ClassInfo2Data const & GetClassInfo2() const { return *classinfo2_data_; } bool IsEffect() const; bool IsInstrument() const; private: CID cid_ = {{}}; String name_; String category_; Steinberg::int32 cardinality_ = -1; std::optional<ClassInfo2Data> classinfo2_data_; }; NS_HWM_END
/* ************************************************************************ * Copyright (c) 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * ************************************************************************ */ #include "preconditioner_multicolored.hpp" #include "../../utils/def.hpp" #include "../solver.hpp" #include "preconditioner.hpp" #include "../../base/local_matrix.hpp" #include "../../base/local_vector.hpp" #include "../../utils/allocate_free.hpp" #include "../../utils/log.hpp" #include <complex> namespace rocalution { template <class OperatorType, class VectorType, typename ValueType> MultiColored<OperatorType, VectorType, ValueType>::MultiColored() { log_debug(this, "MultiColored::MultiColored()", "default constructor"); this->num_blocks_ = 0; this->block_sizes_ = NULL; this->preconditioner_ = NULL; this->analyzer_op_ = NULL; this->op_mat_format_ = false; this->precond_mat_format_ = CSR; this->decomp_ = true; } template <class OperatorType, class VectorType, typename ValueType> MultiColored<OperatorType, VectorType, ValueType>::~MultiColored() { log_debug(this, "MultiColored::~MultiColored()", "destructor"); this->Clear(); } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::Clear(void) { log_debug(this, "MultiColored::Clear()", this->build_); if(this->build_ == true) { delete this->preconditioner_; this->preconditioner_ = NULL; if(this->decomp_ == true) { for(int i = 0; i < this->num_blocks_; ++i) { this->x_block_[i]->Clear(); delete this->x_block_[i]; this->diag_block_[i]->Clear(); delete this->diag_block_[i]; this->diag_solver_[i]->Clear(); delete this->diag_solver_[i]; for(int j = 0; j < this->num_blocks_; ++j) { delete this->preconditioner_block_[i][j]; } delete[] this->preconditioner_block_[i]; } delete[] this->x_block_; delete[] this->diag_block_; delete[] this->diag_solver_; delete[] this->preconditioner_block_; } if(this->analyzer_op_ != this->op_) { delete this->analyzer_op_; } this->analyzer_op_ = NULL; this->x_.Clear(); this->permutation_.Clear(); free_host(&this->block_sizes_); this->num_blocks_ = 0; this->diag_.Clear(); this->op_mat_format_ = false; this->precond_mat_format_ = CSR; this->decomp_ = true; this->build_ = false; } } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::SetPrecondMatrixFormat( unsigned int mat_format) { log_debug(this, "MultiColored::SetPrecondMatrixFormat()", mat_format); this->op_mat_format_ = true; this->precond_mat_format_ = mat_format; } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::SetDecomposition(bool decomp) { log_debug(this, "MultiColored::SetDecomposition()", decomp); this->decomp_ = decomp; } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::Build_Analyser_(void) { log_debug(this, "MultiColored::Build_Analyser_()"); assert(this->op_ != NULL); this->analyzer_op_ = NULL; this->preconditioner_ = new OperatorType; this->preconditioner_->CloneFrom(*this->op_); this->permutation_.CloneBackend(*this->op_); } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::Analyse_(void) { log_debug(this, "MultiColored::Analyse_()"); if(this->analyzer_op_ != NULL) { // use extra matrix this->analyzer_op_->MultiColoring( this->num_blocks_, &this->block_sizes_, &this->permutation_); } else { // op_ matrix this->op_->MultiColoring(this->num_blocks_, &this->block_sizes_, &this->permutation_); } } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::Permute_(void) { log_debug(this, "MultiColored::Permute_()"); assert(this->permutation_.GetSize() > 0); this->preconditioner_->Permute(this->permutation_); } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::Factorize_(void) { } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::Decompose_(void) { log_debug(this, "MultiColored::Decompose_()", " * beging"); if(this->decomp_ == true) { assert(this->num_blocks_ > 0); assert(this->block_sizes_ != NULL); int* offsets = NULL; allocate_host(this->num_blocks_ + 1, &offsets); offsets[0] = 0; for(int i = 0; i < this->num_blocks_; ++i) { offsets[i + 1] = this->block_sizes_[i]; } // sum up for(int i = 0; i < this->num_blocks_; ++i) { offsets[i + 1] += offsets[i]; } this->diag_solver_ = new Solver<OperatorType, VectorType, ValueType>*[this->num_blocks_]; this->preconditioner_block_ = new OperatorType**[this->num_blocks_]; for(int i = 0; i < this->num_blocks_; ++i) { this->preconditioner_block_[i] = new OperatorType*[this->num_blocks_]; } this->x_block_ = new VectorType*[this->num_blocks_]; this->diag_block_ = new VectorType*[this->num_blocks_]; for(int i = 0; i < this->num_blocks_; ++i) { for(int j = 0; j < this->num_blocks_; ++j) { this->preconditioner_block_[i][j] = new LocalMatrix<ValueType>; this->preconditioner_block_[i][j]->CloneBackend(*this->op_); } } this->preconditioner_->ExtractSubMatrices(this->num_blocks_, this->num_blocks_, offsets, offsets, this->preconditioner_block_); free_host(&offsets); int x_offset = 0; for(int i = 0; i < this->num_blocks_; ++i) { this->diag_block_[i] = new VectorType; this->diag_block_[i]->CloneBackend(*this->op_); // clone backend this->diag_block_[i]->Allocate("Diagonal preconditioners blocks", this->block_sizes_[i]); this->preconditioner_block_[i][i]->ExtractDiagonal(this->diag_block_[i]); this->x_block_[i] = new VectorType; // empty vector this->x_block_[i]->CloneBackend(*this->op_); // clone backend this->x_block_[i]->Allocate("MultiColored Preconditioner x_block_", this->block_sizes_[i]); x_offset += this->block_sizes_[i]; Jacobi<OperatorType, VectorType, ValueType>* jacobi = new Jacobi<OperatorType, VectorType, ValueType>; jacobi->SetOperator(*this->preconditioner_block_[i][i]); jacobi->Build(); this->diag_solver_[i] = jacobi; this->preconditioner_block_[i][i]->Clear(); } // Clone the format // e.g. the preconditioner block matrices will have the same format as this->op_ if(this->op_mat_format_ == true) { for(int i = 0; i < this->num_blocks_; ++i) { for(int j = 0; j < this->num_blocks_; ++j) { this->preconditioner_block_[i][j]->ConvertTo(this->precond_mat_format_); } } } } else { this->diag_.CloneBackend(*this->op_); this->preconditioner_->ExtractDiagonal(&this->diag_); } this->x_.CloneBackend(*this->op_); this->x_.Allocate("Permuted solution vector", this->op_->GetM()); log_debug(this, "MultiColored::Decompose_()", " * end"); } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::Build(void) { log_debug(this, "MultiColored::Build()", this->build_, " #*# begin"); assert(this->build_ == false); assert(this->op_ != NULL); this->Build_Analyser_(); this->Analyse_(); if((this->analyzer_op_ != this->op_) && (this->analyzer_op_ != NULL)) { this->analyzer_op_->Clear(); } this->Permute_(); this->Factorize_(); this->Decompose_(); // TODO check for correctness // this->op_->WriteFileMTX("op.mtx"); // this->preconditioner_->WriteFileMTX("precond.mtx"); this->build_ = true; if(this->decomp_ == true) { this->preconditioner_->Clear(); } else { this->PostAnalyse_(); } log_debug(this, "MultiColored::Build()", this->build_, " #*# end"); } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::PostAnalyse_(void) { } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::Solve(const VectorType& rhs, VectorType* x) { log_debug(this, "MultiColored::Solve()", " #*# begin", (const void*&)rhs, x); assert(x != NULL); assert(x != &rhs); assert(this->build_ == true); if(this->decomp_ == true) { // Solve via decomposition this->ExtractRHSinX_(rhs, x); this->SolveL_(); this->SolveD_(); this->SolveR_(); this->InsertSolution_(x); } else { // Solve directly this->Solve_(rhs, x); } log_debug(this, "MultiColored::Solve()", " #*# end"); } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::ExtractRHSinX_(const VectorType& rhs, VectorType* x) { log_debug(this, "MultiColored::ExtractRHSinX_()", (const void*&)rhs, x); assert(this->build_ == true); x->CopyFromPermute(rhs, this->permutation_); int x_offset = 0; for(int i = 0; i < this->num_blocks_; ++i) { this->x_block_[i]->CopyFrom(*x, x_offset, 0, this->block_sizes_[i]); x_offset += this->block_sizes_[i]; } } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::InsertSolution_(VectorType* x) { log_debug(this, "MultiColored::InsertSolution_()", x); assert(this->build_ == true); int x_offset = 0; for(int i = 0; i < this->num_blocks_; ++i) { this->x_.CopyFrom(*this->x_block_[i], 0, x_offset, this->block_sizes_[i]); x_offset += this->block_sizes_[i]; } x->CopyFromPermuteBackward(this->x_, this->permutation_); } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::MoveToHostLocalData_(void) { log_debug(this, "MultiColored::MoveToHostLocalData_()", this->build_); if(this->build_ == true) { this->preconditioner_->MoveToHost(); if(this->decomp_ == true) { for(int i = 0; i < this->num_blocks_; ++i) { this->x_block_[i]->MoveToHost(); this->diag_block_[i]->MoveToHost(); this->diag_solver_[i]->MoveToHost(); for(int j = 0; j < this->num_blocks_; ++j) { this->preconditioner_block_[i][j]->MoveToHost(); } } } if((this->analyzer_op_ != this->op_) && (this->analyzer_op_ != NULL)) { this->analyzer_op_->MoveToHost(); } } this->permutation_.MoveToHost(); this->x_.MoveToHost(); } template <class OperatorType, class VectorType, typename ValueType> void MultiColored<OperatorType, VectorType, ValueType>::MoveToAcceleratorLocalData_(void) { log_debug(this, "MultiColored::MoveToAcceleratorLocalData_()", this->build_); if(this->build_ == true) { this->preconditioner_->MoveToAccelerator(); if(this->decomp_ == true) { for(int i = 0; i < this->num_blocks_; ++i) { this->x_block_[i]->MoveToAccelerator(); this->diag_block_[i]->MoveToAccelerator(); this->diag_solver_[i]->MoveToAccelerator(); for(int j = 0; j < this->num_blocks_; ++j) { this->preconditioner_block_[i][j]->MoveToAccelerator(); } } } if((this->analyzer_op_ != this->op_) && (this->analyzer_op_ != NULL)) { this->analyzer_op_->MoveToAccelerator(); } } this->permutation_.MoveToAccelerator(); this->x_.MoveToAccelerator(); } template class MultiColored<LocalMatrix<double>, LocalVector<double>, double>; template class MultiColored<LocalMatrix<float>, LocalVector<float>, float>; #ifdef SUPPORT_COMPLEX template class MultiColored<LocalMatrix<std::complex<double>>, LocalVector<std::complex<double>>, std::complex<double>>; template class MultiColored<LocalMatrix<std::complex<float>>, LocalVector<std::complex<float>>, std::complex<float>>; #endif } // namespace rocalution
/* * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The OpenAirInterface Software Alliance licenses this file to You under * the OAI Public License, Version 1.1 (the "License"); you may not use this * file except in compliance with the License. You may obtain a copy of the * License at * * http://www.openairinterface.org/?page_id=698 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *------------------------------------------------------------------------------- * For more information about the OpenAirInterface (OAI) Software Alliance: * contact@openairinterface.org */ /*! \file \brief \author Keliang DU, BUPT \date 2020 \email: contact@openairinterface.org */ #ifndef _PERIODICREGISTRATIONUPDATETIMER_H_ #define _PERIODICREGISTRATIONUPDATETIMER_H_ extern "C" { #include "Ngap_PeriodicRegistrationUpdateTimer.h" } namespace ngap { class PeriodicRegistrationUpdateTimer { public: PeriodicRegistrationUpdateTimer(); virtual ~PeriodicRegistrationUpdateTimer(); void setPeriodicRegistrationUpdateTimer(uint8_t m_updatetimer); void getPeriodicRegistrationUpdateTimer(uint8_t& m_updatetimer); bool encode2PeriodicRegistrationUpdateTimer( Ngap_PeriodicRegistrationUpdateTimer_t* periodicRegistrationUpdateTimer); bool decodefromPeriodicRegistrationUpdateTimer( Ngap_PeriodicRegistrationUpdateTimer_t* periodicRegistrationUpdateTimer); private: uint8_t updatetimer; }; } // namespace ngap #endif
/** * Adobe Experience Manager (AEM) API * Swagger AEM is an OpenAPI specification for Adobe Experience Manager (AEM) API * * OpenAPI spec version: 3.2.0-pre.0 * Contact: opensource@shinesolutions.com * * NOTE: This class is auto generated by OpenAPI-Generator 3.2.1-SNAPSHOT. * https://openapi-generator.tech * Do not edit the class manually. */ #include "SamlConfigurationPropertyItemsBoolean.h" #include <string> #include <sstream> #include <boost/property_tree/ptree.hpp> #include <boost/property_tree/json_parser.hpp> using boost::property_tree::ptree; using boost::property_tree::read_json; using boost::property_tree::write_json; namespace org { namespace openapitools { namespace server { namespace model { SamlConfigurationPropertyItemsBoolean::SamlConfigurationPropertyItemsBoolean() { m_Name = ""; m_Optional = false; m_Is_set = false; m_Type = 0; m_Value = false; m_Description = ""; } SamlConfigurationPropertyItemsBoolean::~SamlConfigurationPropertyItemsBoolean() { } std::string SamlConfigurationPropertyItemsBoolean::toJsonString() { std::stringstream ss; ptree pt; pt.put("Name", m_Name); pt.put("Optional", m_Optional); pt.put("Is_set", m_Is_set); pt.put("Type", m_Type); pt.put("Value", m_Value); pt.put("Description", m_Description); write_json(ss, pt, false); return ss.str(); } void SamlConfigurationPropertyItemsBoolean::fromJsonString(std::string const& jsonString) { std::stringstream ss(jsonString); ptree pt; read_json(ss,pt); m_Name = pt.get("Name", ""); m_Optional = pt.get("Optional", false); m_Is_set = pt.get("Is_set", false); m_Type = pt.get("Type", 0); m_Value = pt.get("Value", false); m_Description = pt.get("Description", ""); } std::string SamlConfigurationPropertyItemsBoolean::getName() const { return m_Name; } void SamlConfigurationPropertyItemsBoolean::setName(std::string value) { m_Name = value; } bool SamlConfigurationPropertyItemsBoolean::isOptional() const { return m_Optional; } void SamlConfigurationPropertyItemsBoolean::setOptional(bool value) { m_Optional = value; } bool SamlConfigurationPropertyItemsBoolean::isIsSet() const { return m_Is_set; } void SamlConfigurationPropertyItemsBoolean::setIsSet(bool value) { m_Is_set = value; } int32_t SamlConfigurationPropertyItemsBoolean::getType() const { return m_Type; } void SamlConfigurationPropertyItemsBoolean::setType(int32_t value) { m_Type = value; } bool SamlConfigurationPropertyItemsBoolean::isValue() const { return m_Value; } void SamlConfigurationPropertyItemsBoolean::setValue(bool value) { m_Value = value; } std::string SamlConfigurationPropertyItemsBoolean::getDescription() const { return m_Description; } void SamlConfigurationPropertyItemsBoolean::setDescription(std::string value) { m_Description = value; } } } } }
//===--- REPL.cpp - the integrated REPL -----------------------------------===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// #include "swift/Immediate/Immediate.h" #include "ImmediateImpl.h" #include "swift/Config.h" #include "swift/Subsystems.h" #include "swift/AST/ASTContext.h" #include "swift/AST/DiagnosticsFrontend.h" #include "swift/AST/IRGenOptions.h" #include "swift/AST/Module.h" #include "swift/AST/NameLookup.h" #include "swift/Basic/LLVMContext.h" #include "swift/Frontend/Frontend.h" #include "swift/IDE/REPLCodeCompletion.h" #include "swift/IDE/Utils.h" #include "swift/Parse/PersistentParserState.h" #include "swift/SIL/SILModule.h" #include "swift/SILOptimizer/PassManager/Passes.h" #include "llvm/ExecutionEngine/MCJIT.h" #include "llvm/IR/Module.h" #include "llvm/IR/Constants.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/ADT/StringSet.h" #include "llvm/Support/ConvertUTF.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/Process.h" #if HAVE_UNICODE_LIBEDIT #include <histedit.h> #include <wchar.h> #endif using namespace swift; using namespace swift::immediate; namespace { enum class REPLInputKind : int { /// The REPL got a "quit" signal. REPLQuit, /// Empty whitespace-only input. Empty, /// A REPL directive, such as ':help'. REPLDirective, /// Swift source code. SourceCode, }; template<size_t N> class ConvertForWcharSize; template<> class ConvertForWcharSize<2> { public: static llvm::ConversionResult ConvertFromUTF8(const char** sourceStart, const char* sourceEnd, wchar_t** targetStart, wchar_t* targetEnd, llvm::ConversionFlags flags) { return ConvertUTF8toUTF16(reinterpret_cast<const llvm::UTF8**>(sourceStart), reinterpret_cast<const llvm::UTF8*>(sourceEnd), reinterpret_cast<llvm::UTF16**>(targetStart), reinterpret_cast<llvm::UTF16*>(targetEnd), flags); } static llvm::ConversionResult ConvertToUTF8(const wchar_t** sourceStart, const wchar_t* sourceEnd, char** targetStart, char* targetEnd, llvm::ConversionFlags flags) { return ConvertUTF16toUTF8( reinterpret_cast<const llvm::UTF16**>(sourceStart), reinterpret_cast<const llvm::UTF16*>(sourceEnd), reinterpret_cast<llvm::UTF8**>(targetStart), reinterpret_cast<llvm::UTF8*>(targetEnd), flags); } }; template<> class ConvertForWcharSize<4> { public: static llvm::ConversionResult ConvertFromUTF8(const char** sourceStart, const char* sourceEnd, wchar_t** targetStart, wchar_t* targetEnd, llvm::ConversionFlags flags) { return ConvertUTF8toUTF32(reinterpret_cast<const llvm::UTF8**>(sourceStart), reinterpret_cast<const llvm::UTF8*>(sourceEnd), reinterpret_cast<llvm::UTF32**>(targetStart), reinterpret_cast<llvm::UTF32*>(targetEnd), flags); } static llvm::ConversionResult ConvertToUTF8(const wchar_t** sourceStart, const wchar_t* sourceEnd, char** targetStart, char* targetEnd, llvm::ConversionFlags flags) { return ConvertUTF32toUTF8( reinterpret_cast<const llvm::UTF32**>(sourceStart), reinterpret_cast<const llvm::UTF32*>(sourceEnd), reinterpret_cast<llvm::UTF8**>(targetStart), reinterpret_cast<llvm::UTF8*>(targetEnd), flags); } }; using Convert = ConvertForWcharSize<sizeof(wchar_t)>; #if HAVE_UNICODE_LIBEDIT static void convertFromUTF8(llvm::StringRef utf8, llvm::SmallVectorImpl<wchar_t> &out) { size_t reserve = out.size() + utf8.size(); out.reserve(reserve); const char *utf8_begin = utf8.begin(); wchar_t *wide_begin = out.end(); auto res = Convert::ConvertFromUTF8(&utf8_begin, utf8.end(), &wide_begin, out.data() + reserve, llvm::lenientConversion); assert(res == llvm::conversionOK && "utf8-to-wide conversion failed!"); (void)res; out.set_size(wide_begin - out.begin()); } static void convertToUTF8(llvm::ArrayRef<wchar_t> wide, llvm::SmallVectorImpl<char> &out) { size_t reserve = out.size() + wide.size()*4; out.reserve(reserve); const wchar_t *wide_begin = wide.begin(); char *utf8_begin = out.end(); auto res = Convert::ConvertToUTF8(&wide_begin, wide.end(), &utf8_begin, out.data() + reserve, llvm::lenientConversion); assert(res == llvm::conversionOK && "wide-to-utf8 conversion failed!"); (void)res; out.set_size(utf8_begin - out.begin()); } #endif } // end anonymous namespace #if HAVE_UNICODE_LIBEDIT static ModuleDecl * typeCheckREPLInput(ModuleDecl *MostRecentModule, StringRef Name, PersistentParserState &PersistentState, std::unique_ptr<llvm::MemoryBuffer> Buffer) { using ImplicitModuleImportKind = SourceFile::ImplicitModuleImportKind; assert(MostRecentModule); ASTContext &Ctx = MostRecentModule->getASTContext(); auto REPLModule = ModuleDecl::create(Ctx.getIdentifier(Name), Ctx); auto BufferID = Ctx.SourceMgr.addNewSourceBuffer(std::move(Buffer)); auto ImportKind = ImplicitModuleImportKind::None; auto &REPLInputFile = *new (Ctx) SourceFile(*REPLModule, SourceFileKind::REPL, BufferID, ImportKind); REPLModule->addFile(REPLInputFile); ModuleDecl::ImportedModule ImportOfMostRecentModule{ /*AccessPath*/{}, MostRecentModule}; REPLInputFile.addImports(SourceFile::ImportedModuleDesc( ImportOfMostRecentModule, SourceFile::ImportOptions())); SmallVector<ModuleDecl::ImportedModule, 8> Imports; MostRecentModule->getImportedModules(Imports, ModuleDecl::ImportFilterKind::Private); if (!Imports.empty()) { SmallVector<SourceFile::ImportedModuleDesc, 8> ImportsWithOptions; for (auto Import : Imports) { ImportsWithOptions.emplace_back(SourceFile::ImportedModuleDesc( Import, SourceFile::ImportFlags::Exported)); } REPLInputFile.addImports(ImportsWithOptions); } bool FoundAnySideEffects = false; bool Done; do { FoundAnySideEffects |= parseIntoSourceFile(REPLInputFile, BufferID, &Done, nullptr, &PersistentState); } while (!Done); performTypeChecking(REPLInputFile, PersistentState.getTopLevelContext(), /*Options*/None); return REPLModule; } /// An arbitrary, otherwise-unused char value that editline interprets as /// entering/leaving "literal mode", meaning it passes prompt characters through /// to the terminal without affecting the line state. This prevents color /// escape sequences from interfering with editline's internal state. static constexpr wchar_t LITERAL_MODE_CHAR = L'\1'; /// Append a terminal escape sequence in "literal mode" so that editline /// ignores it. static void appendEscapeSequence(SmallVectorImpl<wchar_t> &dest, llvm::StringRef src) { dest.push_back(LITERAL_MODE_CHAR); convertFromUTF8(src, dest); dest.push_back(LITERAL_MODE_CHAR); } /// The main REPL prompt string. static const wchar_t * const PS1 = L"(swift) "; /// The REPL prompt string for line continuations. static const wchar_t * const PS2 = L" "; class REPLInput; class REPLEnvironment; namespace { /// Observe that we are processing REPL input. Dump source and reset any /// colorization before dying. class PrettyStackTraceREPL : public llvm::PrettyStackTraceEntry { REPLInput &Input; public: PrettyStackTraceREPL(REPLInput &Input) : Input(Input) {} void print(llvm::raw_ostream &out) const override; }; } // end anonymous namespace /// EditLine wrapper that implements the user interface behavior for reading /// user input to the REPL. All of its methods must be usable from a separate /// thread and so shouldn't touch anything outside of the EditLine, History, /// and member object state. /// /// FIXME: Need the module for completions! Currently REPLRunLoop uses /// synchronous messaging between the REPLInput thread and the main thread, /// and client code shouldn't have access to the AST, so only one thread will /// be accessing the module at a time. However, if REPLRunLoop /// (or a new REPL application) ever requires asynchronous messaging between /// REPLInput and REPLEnvironment, or if client code expected to be able to /// grovel into the REPL's AST, then locking will be necessary to serialize /// access to the AST. class REPLInput { PrettyStackTraceREPL StackTrace; EditLine *e; HistoryW *h; size_t PromptContinuationLevel; bool NeedPromptContinuation; bool ShowColors; bool PromptedForLine; bool Outdented; REPLCompletions completions; llvm::SmallVector<wchar_t, 80> PromptString; /// A buffer for all lines that the user entered, but we have not parsed yet. llvm::SmallString<128> CurrentLines; llvm::SmallString<16> CodeCompletionErasedBytes; public: REPLEnvironment &Env; bool Autoindent; REPLInput(REPLEnvironment &env) : StackTrace(*this), Env(env), Autoindent(true) { // Only show colors if both stderr and stdout have colors. ShowColors = llvm::errs().has_colors() && llvm::outs().has_colors(); // Make sure the terminal color gets restored when the REPL is quit. if (ShowColors) atexit([] { llvm::outs().resetColor(); llvm::errs().resetColor(); }); e = el_init("swift", stdin, stdout, stderr); h = history_winit(); PromptContinuationLevel = 0; el_wset(e, EL_EDITOR, L"emacs"); el_wset(e, EL_PROMPT_ESC, PromptFn, LITERAL_MODE_CHAR); el_wset(e, EL_CLIENTDATA, (void*)this); el_wset(e, EL_HIST, history, h); el_wset(e, EL_SIGNAL, 1); el_wset(e, EL_GETCFN, GetCharFn); // Provide special outdenting behavior for '}' and ':'. el_wset(e, EL_ADDFN, L"swift-close-brace", L"Reduce {} indentation level", BindingFn<&REPLInput::onCloseBrace>); el_wset(e, EL_BIND, L"}", L"swift-close-brace", nullptr); el_wset(e, EL_ADDFN, L"swift-colon", L"Reduce label indentation level", BindingFn<&REPLInput::onColon>); el_wset(e, EL_BIND, L":", L"swift-colon", nullptr); // Provide special indent/completion behavior for tab. el_wset(e, EL_ADDFN, L"swift-indent-or-complete", L"Indent line or trigger completion", BindingFn<&REPLInput::onIndentOrComplete>); el_wset(e, EL_BIND, L"\t", L"swift-indent-or-complete", nullptr); el_wset(e, EL_ADDFN, L"swift-complete", L"Trigger completion", BindingFn<&REPLInput::onComplete>); // Provide some common bindings to complement editline's defaults. // ^W should delete previous word, not the entire line. el_wset(e, EL_BIND, L"\x17", L"ed-delete-prev-word", nullptr); // ^_ should undo. el_wset(e, EL_BIND, L"\x1f", L"vi-undo", nullptr); HistEventW ev; history_w(h, &ev, H_SETSIZE, 800); } ~REPLInput() { if (ShowColors) llvm::outs().resetColor(); // FIXME: This should not be needed, but seems to help when stdout is being // redirected to a file. Perhaps there is some underlying editline bug // where it is setting stdout into some weird state and not restoring it // with el_end? llvm::outs().flush(); fflush(stdout); el_end(e); } REPLInputKind getREPLInput(SmallVectorImpl<char> &Result) { ide::SourceCompleteResult SCR; SCR.IsComplete = true; unsigned CurChunkLines = 0; wchar_t TotalLine[4096] = L""; CurrentLines.clear(); // Reset color before showing the prompt. if (ShowColors) llvm::outs().resetColor(); do { // Read one line. PromptContinuationLevel = SCR.IndentLevel; NeedPromptContinuation = !SCR.IsComplete; PromptedForLine = false; Outdented = false; int LineCount; size_t LineStart = CurrentLines.size(); const wchar_t* WLine = el_wgets(e, &LineCount); if (!WLine) { // End-of-file. if (PromptedForLine) llvm::outs() << "\n"; return REPLInputKind::REPLQuit; } if (Autoindent) { size_t indent = PromptContinuationLevel*2; CurrentLines.append(indent, ' '); } size_t WLineLength = wcslen(WLine); convertToUTF8(llvm::makeArrayRef(WLine, WLine + WLineLength), CurrentLines); wcsncat(TotalLine, WLine, WLineLength); ++CurChunkLines; // If we detect a line starting with a colon, treat it as a special // REPL escape. char const *s = CurrentLines.data() + LineStart; char const *p = s; while (p < CurrentLines.end() && isspace(*p)) { ++p; } if (p == CurrentLines.end()) { if (!SCR.IsComplete) continue; return REPLInputKind::Empty; } if (CurChunkLines == 1 && SCR.IndentLevel == 0 && *p == ':') { // Colorize the response output. if (ShowColors) llvm::outs().changeColor(llvm::raw_ostream::GREEN); Result.clear(); Result.append(CurrentLines.begin(), CurrentLines.end()); // The lexer likes null-terminated data. Result.push_back('\0'); Result.pop_back(); // Enter the line into the line history. HistEventW ev; history_w(h, &ev, H_ENTER, TotalLine); return REPLInputKind::REPLDirective; } SCR = ide::isSourceInputComplete(CurrentLines.str(), SourceFileKind::REPL); // Keep reading if input is unfinished. } while (!SCR.IsComplete); // Enter the line into the line history. HistEventW ev; history_w(h, &ev, H_ENTER, TotalLine); Result.clear(); Result.append(CurrentLines.begin(), CurrentLines.end()); // The lexer likes null-terminated data. Result.push_back('\0'); Result.pop_back(); // Colorize the response output. if (ShowColors) llvm::outs().changeColor(llvm::raw_ostream::CYAN); return REPLInputKind::SourceCode; } private: static wchar_t *PromptFn(EditLine *e) { void* clientdata; el_wget(e, EL_CLIENTDATA, &clientdata); return const_cast<wchar_t*>(((REPLInput*)clientdata)->getPrompt()); } const wchar_t *getPrompt() { PromptString.clear(); if (ShowColors) { const char *colorCode = llvm::sys::Process::OutputColor(llvm::raw_ostream::YELLOW, false, false); if (colorCode) appendEscapeSequence(PromptString, colorCode); } if (!NeedPromptContinuation) PromptString.insert(PromptString.end(), PS1, PS1 + wcslen(PS1)); else { PromptString.insert(PromptString.end(), PS2, PS2 + wcslen(PS2)); if (Autoindent) PromptString.append(2*PromptContinuationLevel, L' '); } if (ShowColors) { const char *colorCode = llvm::sys::Process::ResetColor(); if (colorCode) appendEscapeSequence(PromptString, colorCode); } PromptedForLine = true; PromptString.push_back(L'\0'); return PromptString.data(); } /// Custom GETCFN to reset completion state after typing. static int GetCharFn(EditLine *e, wchar_t *out) { void* clientdata; el_wget(e, EL_CLIENTDATA, &clientdata); REPLInput *that = (REPLInput*)clientdata; wint_t c; while (errno = 0, (c = getwc(stdin)) == WEOF) { if (errno == EINTR) continue; *out = L'\0'; return feof(stdin) ? 0 : -1; } // If the user typed anything other than tab, reset the completion state. if (c != L'\t') { that->completions.reset(); that->CodeCompletionErasedBytes.clear(); } *out = wchar_t(c); return 1; } template<unsigned char (REPLInput::*method)(int)> static unsigned char BindingFn(EditLine *e, int ch) { void *clientdata; el_wget(e, EL_CLIENTDATA, &clientdata); return (((REPLInput*)clientdata)->*method)(ch); } bool isAtStartOfLine(const LineInfoW *line) { for (wchar_t c : llvm::makeArrayRef(line->buffer, line->cursor - line->buffer)) { if (!iswspace(c)) return false; } return true; } // /^\s*\w+\s*:$/ bool lineLooksLikeLabel(const LineInfoW *line) { const wchar_t *p = line->buffer; while (p != line->cursor && iswspace(*p)) ++p; if (p == line->cursor) return false; do { ++p; } while (p != line->cursor && (iswalnum(*p) || *p == L'_')); while (p != line->cursor && iswspace(*p)) ++p; return p+1 == line->cursor || *p == L':'; } // /^\s*set\s*\(.*\)\s*:$/ bool lineLooksLikeSetter(const LineInfoW *line) { const wchar_t *p = line->buffer; while (p != line->cursor && iswspace(*p)) ++p; if (p == line->cursor || *p++ != L's') return false; if (p == line->cursor || *p++ != L'e') return false; if (p == line->cursor || *p++ != L't') return false; while (p != line->cursor && iswspace(*p)) ++p; if (p == line->cursor || *p++ != L'(') return false; if (line->cursor - p < 2 || line->cursor[-1] != L':') return false; p = line->cursor - 1; while (iswspace(*--p)); return *p == L')'; } // /^\s*case.*:$/ bool lineLooksLikeCase(const LineInfoW *line) { const wchar_t *p = line->buffer; while (p != line->cursor && iswspace(*p)) ++p; if (p == line->cursor || *p++ != L'c') return false; if (p == line->cursor || *p++ != L'a') return false; if (p == line->cursor || *p++ != L's') return false; if (p == line->cursor || *p++ != L'e') return false; return line->cursor[-1] == ':'; } void outdent() { // If we didn't already outdent, do so. if (!Outdented) { if (PromptContinuationLevel > 0) --PromptContinuationLevel; Outdented = true; } } unsigned char onColon(int ch) { // Add the character to the string. wchar_t s[2] = {(wchar_t)ch, 0}; el_winsertstr(e, s); const LineInfoW *line = el_wline(e); // Outdent if the line looks like a label. if (lineLooksLikeLabel(line)) outdent(); // Outdent if the line looks like a setter. else if (lineLooksLikeSetter(line)) outdent(); // Outdent if the line looks like a 'case' label. else if (lineLooksLikeCase(line)) outdent(); return CC_REFRESH; } unsigned char onCloseBrace(int ch) { bool atStart = isAtStartOfLine(el_wline(e)); // Add the character to the string. wchar_t s[2] = {(wchar_t)ch, 0}; el_winsertstr(e, s); // Don't outdent if we weren't at the start of the line. if (!atStart) { return CC_REFRESH; } outdent(); return CC_REFRESH; } unsigned char onIndentOrComplete(int ch) { const LineInfoW *line = el_wline(e); // FIXME: UTF-8? What's that? size_t cursorPos = line->cursor - line->buffer; // If there's nothing but whitespace before the cursor, indent to the next // 2-character tab stop. if (isAtStartOfLine(line)) { const wchar_t *indent = cursorPos & 1 ? L" " : L" "; el_winsertstr(e, indent); return CC_REFRESH; } // Otherwise, look for completions. return onComplete(ch); } void insertStringRef(StringRef s) { if (s.empty()) return; // Convert s to wchar_t* and null-terminate for el_winsertstr. SmallVector<wchar_t, 64> TmpStr; convertFromUTF8(s, TmpStr); TmpStr.push_back(L'\0'); el_winsertstr(e, TmpStr.data()); } void displayCompletions(llvm::ArrayRef<llvm::StringRef> list) { // FIXME: Do the print-completions-below-the-prompt thing bash does. llvm::outs() << '\n'; // Trim the completion list to the terminal size. int lines_int = 0, columns_int = 0; // NB: EL_GETTC doesn't work with el_wget (?!) el_get(e, EL_GETTC, "li", &lines_int); el_get(e, EL_GETTC, "co", &columns_int); assert(lines_int > 0 && columns_int > 0 && "negative or zero screen size?!"); auto lines = size_t(lines_int), columns = size_t(columns_int); size_t trimToColumns = columns > 2 ? columns - 2 : 0; size_t trimmed = 0; if (list.size() > lines - 1) { size_t trimToLines = lines > 2 ? lines - 2 : 0; trimmed = list.size() - trimToLines; list = list.slice(0, trimToLines); } for (StringRef completion : list) { if (completion.size() > trimToColumns) completion = completion.slice(0, trimToColumns); llvm::outs() << " " << completion << '\n'; } if (trimmed > 0) llvm::outs() << " (and " << trimmed << " more)\n"; } SourceFile &getFileForCodeCompletion(); unsigned char onComplete(int ch) { const LineInfoW *line = el_wline(e); llvm::ArrayRef<wchar_t> wprefix(line->buffer, line->cursor - line->buffer); llvm::SmallString<64> Prefix; Prefix.assign(CurrentLines); convertToUTF8(wprefix, Prefix); if (!completions) { // If we aren't currently working with a completion set, generate one. completions.populate(getFileForCodeCompletion(), Prefix); // Display the common root of the found completions and beep unless we // found a unique one. insertStringRef(completions.getRoot()); return completions.isUnique() ? CC_REFRESH : CC_REFRESH_BEEP; } // Otherwise, advance through the completion state machine. switch (completions.getState()) { case CompletionState::CompletedRoot: // We completed the root. Next step is to display the completion list. displayCompletions(completions.getCompletionList()); completions.setState(CompletionState::DisplayedCompletionList); return CC_REDISPLAY; case CompletionState::DisplayedCompletionList: { // Complete the next completion stem in the cycle. const auto Last = completions.getPreviousStem(); el_wdeletestr(e, Last.InsertableString.size()); Prefix.resize(Prefix.size() - Last.InsertableString.size()); insertStringRef(CodeCompletionErasedBytes); Prefix.append(CodeCompletionErasedBytes); const auto Next = completions.getNextStem(); CodeCompletionErasedBytes.clear(); if (Next.NumBytesToErase != 0) { CodeCompletionErasedBytes.assign(Prefix.end() - Next.NumBytesToErase, Prefix.end()); el_wdeletestr(e, Next.NumBytesToErase); } insertStringRef(Next.InsertableString); return CC_REFRESH; } case CompletionState::Empty: case CompletionState::Unique: // We already provided a definitive completion--nothing else to do. return CC_REFRESH_BEEP; case CompletionState::Invalid: llvm_unreachable("got an invalid completion set?!"); } } }; enum class PrintOrDump { Print, Dump }; static void printOrDumpDecl(Decl *d, PrintOrDump which) { if (which == PrintOrDump::Print) { d->print(llvm::outs()); llvm::outs() << '\n'; } else d->dump(llvm::outs()); } /// The compiler and execution environment for the REPL. class REPLEnvironment { CompilerInstance &CI; ModuleDecl *MostRecentModule; ProcessCmdLine CmdLine; llvm::SmallPtrSet<swift::ModuleDecl *, 8> ImportedModules; SmallVector<llvm::Function*, 8> InitFns; bool RanGlobalInitializers; llvm::LLVMContext &LLVMContext; llvm::Module *Module; llvm::StringSet<> FuncsAlreadyGenerated; llvm::StringSet<> GlobalsAlreadyEmitted; llvm::Module DumpModule; llvm::SmallString<128> DumpSource; llvm::ExecutionEngine *EE; IRGenOptions IRGenOpts; const SILOptions SILOpts; REPLInput Input; PersistentParserState PersistentState; unsigned NextLineNumber = 0; private: void stripPreviouslyGenerated(llvm::Module &M) { for (auto &function : M.getFunctionList()) { function.setVisibility(llvm::GlobalValue::DefaultVisibility); if (FuncsAlreadyGenerated.count(function.getName())) function.deleteBody(); else { if (function.getName() != SWIFT_ENTRY_POINT_FUNCTION) FuncsAlreadyGenerated.insert(function.getName()); } } for (auto &global : M.globals()) { if (!global.hasName()) continue; if (global.hasGlobalUnnamedAddr()) continue; global.setVisibility(llvm::GlobalValue::DefaultVisibility); if (!global.hasAvailableExternallyLinkage() && !global.hasAppendingLinkage() && !global.hasCommonLinkage()) { if (GlobalsAlreadyEmitted.count(global.getName())) { // Some targets don't support relative references to undefined // symbols. Keep the local copy of an ODR symbol if it's used in // a relative reference expression. bool usedInRelativeReference = false; if (global.hasLinkOnceODRLinkage()) { for (auto user : global.users()) { // A relative reference will look like sub (ptrtoint @Global, _) auto expr = dyn_cast<llvm::ConstantExpr>(user); if (!expr) continue; if (expr->getOpcode() != llvm::Instruction::PtrToInt) continue; for (auto exprUser : expr->users()) { auto exprExpr = dyn_cast<llvm::ConstantExpr>(exprUser); if (!exprExpr) continue; if (exprExpr->getOpcode() != llvm::Instruction::Sub) continue; if (exprExpr->getOperand(0) != expr) continue; usedInRelativeReference = true; goto done; } } } done: if (!usedInRelativeReference) global.setInitializer(nullptr); } else GlobalsAlreadyEmitted.insert(global.getName()); global.setLinkage(llvm::GlobalValue::ExternalLinkage); } } for (auto alias = M.alias_begin(); alias != M.alias_end();) { alias->setVisibility(llvm::GlobalValue::DefaultVisibility); if (!alias->hasAvailableExternallyLinkage() && !alias->hasAppendingLinkage() && !alias->hasCommonLinkage()) { alias->setLinkage(llvm::GlobalValue::ExternalLinkage); if (GlobalsAlreadyEmitted.count(alias->getName())) { // Replace already-emitted aliases with external declarations. SmallString<32> name = alias->getName(); alias->setName(""); auto external = new llvm::GlobalVariable( M, alias->getType()->getPointerElementType(), /*isConstant*/ false, alias->getLinkage(), /*initializer*/ nullptr, name); alias->replaceAllUsesWith(external); auto &aliasToRemove = *alias; ++alias; aliasToRemove.eraseFromParent(); } else { GlobalsAlreadyEmitted.insert(alias->getName()); ++alias; } } } } bool executeSwiftSource(llvm::StringRef Line, const ProcessCmdLine &CmdLine) { // Parse the current line(s). auto InputBuf = llvm::MemoryBuffer::getMemBufferCopy(Line, "<REPL Input>"); SmallString<8> Name{"REPL_"}; llvm::raw_svector_ostream(Name) << NextLineNumber; ++NextLineNumber; ModuleDecl *M = typeCheckREPLInput(MostRecentModule, Name, PersistentState, std::move(InputBuf)); // SILGen the module and produce SIL diagnostics. std::unique_ptr<SILModule> sil; if (!CI.getASTContext().hadError()) { // We don't want anything to get stripped, so pretend we're doing a // non-whole-module generation. sil = performSILGeneration(*M->getFiles().front(), CI.getSILOptions()); runSILDiagnosticPasses(*sil); runSILOwnershipEliminatorPass(*sil); runSILLoweringPasses(*sil); } if (CI.getASTContext().hadError()) { if (CI.getDiags().hasFatalErrorOccurred()) return false; CI.getASTContext().Diags.resetHadAnyError(); // FIXME: Handling of "import" declarations? Is there any other // state which needs to be reset? return true; } MostRecentModule = M; DumpSource += Line; // IRGen the current line(s). // FIXME: We shouldn't need to use the global context here, but // something is persisting across calls to performIRGeneration. auto LineModule = performIRGeneration( IRGenOpts, M, std::move(sil), "REPLLine", PrimarySpecificPaths(), getGlobalLLVMContext(), /*parallelOutputFilenames*/{}); if (CI.getASTContext().hadError()) return false; // LineModule will get destroy by the following link process. // Make a copy of it to be able to correct produce DumpModule. std::unique_ptr<llvm::Module> SaveLineModule(CloneModule(*LineModule)); if (!linkLLVMModules(Module, std::move(LineModule))) { return false; } std::unique_ptr<llvm::Module> NewModule(CloneModule(*Module)); Module->getFunction("main")->eraseFromParent(); stripPreviouslyGenerated(*NewModule); if (!linkLLVMModules(&DumpModule, std::move(SaveLineModule))) { return false; } llvm::Function *DumpModuleMain = DumpModule.getFunction("main"); DumpModuleMain->setName("repl.line"); if (autolinkImportedModules(M, IRGenOpts)) return false; llvm::Module *TempModule = NewModule.get(); EE->addModule(std::move(NewModule)); EE->finalizeObject(); for (auto InitFn : InitFns) EE->runFunctionAsMain(InitFn, CmdLine, nullptr); InitFns.clear(); // FIXME: The way we do this is really ugly... we should be able to // improve this. if (!RanGlobalInitializers) { EE->runStaticConstructorsDestructors(*TempModule, false); RanGlobalInitializers = true; } llvm::Function *EntryFn = TempModule->getFunction("main"); EE->runFunctionAsMain(EntryFn, CmdLine, nullptr); return true; } public: REPLEnvironment(CompilerInstance &CI, const ProcessCmdLine &CmdLine, llvm::LLVMContext &LLVMCtx, bool ParseStdlib) : CI(CI), MostRecentModule(CI.getMainModule()), CmdLine(CmdLine), RanGlobalInitializers(false), LLVMContext(LLVMCtx), Module(new llvm::Module("REPL", LLVMContext)), DumpModule("REPL", LLVMContext), IRGenOpts(), SILOpts(), Input(*this), PersistentState(CI.getASTContext()) { ASTContext &Ctx = CI.getASTContext(); Ctx.LangOpts.EnableAccessControl = false; if (!ParseStdlib) { if (!loadSwiftRuntime(Ctx.SearchPathOpts.RuntimeLibraryPath)) { CI.getDiags().diagnose(SourceLoc(), diag::error_immediate_mode_missing_stdlib); return; } tryLoadLibraries(CI.getLinkLibraries(), Ctx.SearchPathOpts, CI.getDiags()); } llvm::EngineBuilder builder{std::unique_ptr<llvm::Module>{Module}}; std::string ErrorMsg; llvm::TargetOptions TargetOpt; std::string CPU; std::string Triple; std::vector<std::string> Features; std::tie(TargetOpt, CPU, Features, Triple) = getIRTargetOptions(IRGenOpts, CI.getASTContext()); builder.setRelocationModel(llvm::Reloc::PIC_); builder.setTargetOptions(TargetOpt); builder.setMCPU(CPU); builder.setMAttrs(Features); builder.setErrorStr(&ErrorMsg); builder.setEngineKind(llvm::EngineKind::JIT); EE = builder.create(); IRGenOpts.OptMode = OptimizationMode::NoOptimization; IRGenOpts.OutputKind = IRGenOutputKind::Module; IRGenOpts.UseJIT = true; IRGenOpts.IntegratedREPL = true; IRGenOpts.DebugInfoLevel = IRGenDebugInfoLevel::None; IRGenOpts.DebugInfoFormat = IRGenDebugInfoFormat::None; // The very first module is a dummy. CI.getMainModule()->getMainSourceFile(SourceFileKind::REPL).ASTStage = SourceFile::TypeChecked; if (!ParseStdlib) { // Force standard library to be loaded immediately. This forces any // errors to appear upfront, and helps eliminate some nasty lag after the // first statement is typed into the REPL. static const char WarmUpStmt[] = "Void()\n"; auto Buffer = llvm::MemoryBuffer::getMemBufferCopy(WarmUpStmt, "<REPL Initialization>"); (void)typeCheckREPLInput(MostRecentModule, "__Warmup", PersistentState, std::move(Buffer)); if (Ctx.hadError()) return; } if (llvm::sys::Process::StandardInIsUserInput()) llvm::outs() << "*** You are running Swift's integrated REPL, ***\n" "*** intended for compiler and stdlib ***\n" "*** development and testing purposes only. ***\n" "*** The full REPL is built as part of LLDB. ***\n" "*** Type ':help' for assistance. ***\n"; } StringRef getDumpSource() const { return DumpSource; } /// Get the REPLInput object owned by the REPL instance. REPLInput &getInput() { return Input; } SourceFile &getFileForCodeCompletion() { return MostRecentModule->getMainSourceFile(SourceFileKind::REPL); } /// Responds to a REPL input. Returns true if the repl should continue, /// false if it should quit. bool handleREPLInput(REPLInputKind inputKind, llvm::StringRef Line) { switch (inputKind) { case REPLInputKind::REPLQuit: return false; case REPLInputKind::Empty: return true; case REPLInputKind::REPLDirective: { unsigned BufferID = CI.getSourceMgr().addMemBufferCopy(Line, "<REPL Input>"); Lexer L(CI.getASTContext().LangOpts, CI.getSourceMgr(), BufferID, nullptr, LexerMode::Swift); Token Tok; L.lex(Tok); assert(Tok.is(tok::colon)); if (L.peekNextToken().getText() == "help") { llvm::outs() << "Available commands:\n" " :quit - quit the interpreter (you can also use :exit " "or Control+D or exit(0))\n" " :autoindent (on|off) - turn on/off automatic indentation of" " bracketed lines\n" " :constraints debug (on|off) - turn on/off the debug " "output for the constraint-based type checker\n" " :dump_ir - dump the LLVM IR generated by the REPL\n" " :dump_decl <name> - dump the AST representation of the " "named declarations\n" " :dump_source - dump the user input (ignoring" " lines with errors)\n" " :print_decl <name> - print the AST representation of the " "named declarations\n" " :print_module <name> - print the decls in the given " "module, but not submodules\n" "API documentation etc. will be here eventually.\n"; } else if (L.peekNextToken().getText() == "quit" || L.peekNextToken().getText() == "exit") { return false; } else if (L.peekNextToken().getText() == "dump_ir") { DumpModule.print(llvm::dbgs(), nullptr, false, true); } else if (L.peekNextToken().getText() == "dump_decl" || L.peekNextToken().getText() == "print_decl") { PrintOrDump doPrint = (L.peekNextToken().getText() == "print_decl") ? PrintOrDump::Print : PrintOrDump::Dump; L.lex(Tok); L.lex(Tok); ASTContext &ctx = CI.getASTContext(); SourceFile &SF = MostRecentModule->getMainSourceFile(SourceFileKind::REPL); UnqualifiedLookup lookup(ctx.getIdentifier(Tok.getText()), &SF, nullptr); for (auto result : lookup.Results) { printOrDumpDecl(result.getValueDecl(), doPrint); if (auto typeDecl = dyn_cast<TypeDecl>(result.getValueDecl())) { if (auto typeAliasDecl = dyn_cast<TypeAliasDecl>(typeDecl)) { TypeDecl *origTypeDecl = typeAliasDecl ->getDeclaredInterfaceType() ->getDesugaredType() ->getNominalOrBoundGenericNominal(); if (origTypeDecl) { printOrDumpDecl(origTypeDecl, doPrint); typeDecl = origTypeDecl; } } // Print extensions. if (auto nominal = dyn_cast<NominalTypeDecl>(typeDecl)) { for (auto extension : nominal->getExtensions()) { printOrDumpDecl(extension, doPrint); } } } } } else if (L.peekNextToken().getText() == "dump_source") { llvm::errs() << DumpSource; } else if (L.peekNextToken().getText() == "print_module") { L.lex(Tok); SmallVector<ImportDecl::AccessPathElement, 4> accessPath; ASTContext &ctx = CI.getASTContext(); L.lex(Tok); if (Tok.is(tok::identifier)) { accessPath.push_back({ctx.getIdentifier(Tok.getText()), Tok.getLoc()}); while (L.peekNextToken().is(tok::period)) { L.lex(Tok); L.lex(Tok); if (Tok.is(tok::identifier)) { accessPath.push_back({ctx.getIdentifier(Tok.getText()), Tok.getLoc()}); } else { llvm::outs() << "Not a submodule name: '" << Tok.getText() << "'\n"; accessPath.clear(); } } } else { llvm::outs() << "Not a module name: '" << Tok.getText() << "'\n"; } if (!accessPath.empty()) { auto M = ctx.getModule(accessPath); if (!M) llvm::outs() << "No such module\n"; else { SmallVector<Decl *, 64> decls; M->getDisplayDecls(decls); for (const Decl *D : decls) { D->print(llvm::outs()); llvm::outs() << '\n'; } } } } else if (L.peekNextToken().getText() == "constraints") { L.lex(Tok); L.lex(Tok); if (Tok.getText() == "debug") { L.lex(Tok); if (Tok.getText() == "on") { CI.getASTContext().LangOpts.DebugConstraintSolver = true; } else if (Tok.getText() == "off") { CI.getASTContext().LangOpts.DebugConstraintSolver = false; } else { llvm::outs() << "Unknown :constraints debug command; try :help\n"; } } else { llvm::outs() << "Unknown :constraints command; try :help\n"; } } else if (L.peekNextToken().getText() == "autoindent") { L.lex(Tok); L.lex(Tok); if (Tok.getText() == "on") { Input.Autoindent = true; } else if (Tok.getText() == "off") { Input.Autoindent = false; } else { llvm::outs() << "Unknown :autoindent command; try :help\n"; } } else { llvm::outs() << "Unknown interpreter escape; try :help\n"; } return true; } case REPLInputKind::SourceCode: { // Execute this source line. return executeSwiftSource(Line, CmdLine); } } } }; inline SourceFile &REPLInput::getFileForCodeCompletion() { return Env.getFileForCodeCompletion(); } void PrettyStackTraceREPL::print(llvm::raw_ostream &out) const { out << "while processing REPL source:\n"; out << Input.Env.getDumpSource(); llvm::outs().resetColor(); llvm::errs().resetColor(); } void swift::runREPL(CompilerInstance &CI, const ProcessCmdLine &CmdLine, bool ParseStdlib) { REPLEnvironment env(CI, CmdLine, getGlobalLLVMContext(), ParseStdlib); if (CI.getASTContext().hadError()) return; llvm::SmallString<80> Line; REPLInputKind inputKind; do { inputKind = env.getInput().getREPLInput(Line); } while (env.handleREPLInput(inputKind, Line)); } #else void swift::runREPL(CompilerInstance &CI, const ProcessCmdLine &CmdLine, bool ParseStdlib) { // Disable the REPL on other platforms; our current implementation is tied // to histedit.h. llvm::report_fatal_error("Compiler-internal integrated REPL unimplemented " "for this platform; use the LLDB-enhanced REPL " "instead."); } #endif
#include "askpassphrasedialog.h" #include "ui_askpassphrasedialog.h" #include "guiconstants.h" #include "walletmodel.h" #include <QMessageBox> #include <QPushButton> #include <QKeyEvent> extern bool fWalletUnlockStakingOnly; AskPassphraseDialog::AskPassphraseDialog(Mode mode, QWidget *parent) : QDialog(parent), ui(new Ui::AskPassphraseDialog), mode(mode), model(0), fCapsLock(false) { ui->setupUi(this); ui->passEdit1->setMaxLength(MAX_PASSPHRASE_SIZE); ui->passEdit2->setMaxLength(MAX_PASSPHRASE_SIZE); ui->passEdit3->setMaxLength(MAX_PASSPHRASE_SIZE); // Setup Caps Lock detection. ui->passEdit1->installEventFilter(this); ui->passEdit2->installEventFilter(this); ui->passEdit3->installEventFilter(this); ui->stakingCheckBox->setChecked(fWalletUnlockStakingOnly); switch(mode) { case Encrypt: // Ask passphrase x2 ui->passLabel1->hide(); ui->passEdit1->hide(); ui->warningLabel->setText(tr("Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>ten or more random characters</b>, or <b>eight or more words</b>.")); setWindowTitle(tr("Encrypt wallet")); break; case UnlockStaking: ui->stakingCheckBox->setChecked(true); ui->stakingCheckBox->show(); // fallthru case Unlock: // Ask passphrase ui->warningLabel->setText(tr("This operation needs your wallet passphrase to unlock the wallet.")); ui->passLabel2->hide(); ui->passEdit2->hide(); ui->passLabel3->hide(); ui->passEdit3->hide(); setWindowTitle(tr("Unlock wallet")); break; case Decrypt: // Ask passphrase ui->warningLabel->setText(tr("This operation needs your wallet passphrase to decrypt the wallet.")); ui->passLabel2->hide(); ui->passEdit2->hide(); ui->passLabel3->hide(); ui->passEdit3->hide(); setWindowTitle(tr("Decrypt wallet")); break; case ChangePass: // Ask old passphrase + new passphrase x2 setWindowTitle(tr("Change passphrase")); ui->warningLabel->setText(tr("Enter the old and new passphrase to the wallet.")); break; } textChanged(); connect(ui->passEdit1, SIGNAL(textChanged(QString)), this, SLOT(textChanged())); connect(ui->passEdit2, SIGNAL(textChanged(QString)), this, SLOT(textChanged())); connect(ui->passEdit3, SIGNAL(textChanged(QString)), this, SLOT(textChanged())); } AskPassphraseDialog::~AskPassphraseDialog() { secureClearPassFields(); delete ui; } void AskPassphraseDialog::setModel(WalletModel *model) { this->model = model; } void AskPassphraseDialog::accept() { SecureString oldpass, newpass1, newpass2; if(!model) return; oldpass.reserve(MAX_PASSPHRASE_SIZE); newpass1.reserve(MAX_PASSPHRASE_SIZE); newpass2.reserve(MAX_PASSPHRASE_SIZE); // TODO: get rid of this .c_str() by implementing SecureString::operator=(std::string) // Alternately, find a way to make this input mlock()'d to begin with. oldpass.assign(ui->passEdit1->text().toStdString().c_str()); newpass1.assign(ui->passEdit2->text().toStdString().c_str()); newpass2.assign(ui->passEdit3->text().toStdString().c_str()); secureClearPassFields(); switch(mode) { case Encrypt: { if(newpass1.empty() || newpass2.empty()) { // Cannot encrypt with empty passphrase break; } QMessageBox::StandardButton retval = QMessageBox::question(this, tr("Confirm wallet encryption"), tr("Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR COINS</b>!") + "<br><br>" + tr("Are you sure you wish to encrypt your wallet?"), QMessageBox::Yes|QMessageBox::Cancel, QMessageBox::Cancel); if(retval == QMessageBox::Yes) { if(newpass1 == newpass2) { if(model->setWalletEncrypted(true, newpass1)) { QMessageBox::warning(this, tr("Wallet encrypted"), "<qt>" + tr("c0fferCoin will close now to finish the encryption process. " "Remember that encrypting your wallet cannot fully protect " "your coins from being stolen by malware infecting your computer.") + "<br><br><b>" + tr("IMPORTANT: Any previous backups you have made of your wallet file " "should be replaced with the newly generated, encrypted wallet file. " "For security reasons, previous backups of the unencrypted wallet file " "will become useless as soon as you start using the new, encrypted wallet.") + "</b></qt>"); QApplication::quit(); } else { QMessageBox::critical(this, tr("Wallet encryption failed"), tr("Wallet encryption failed due to an internal error. Your wallet was not encrypted.")); } QDialog::accept(); // Success } else { QMessageBox::critical(this, tr("Wallet encryption failed"), tr("The supplied passphrases do not match.")); } } else { QDialog::reject(); // Cancelled } } break; case UnlockStaking: case Unlock: if(!model->setWalletLocked(false, oldpass)) { QMessageBox::critical(this, tr("Wallet unlock failed"), tr("The passphrase entered for the wallet decryption was incorrect.")); } else { fWalletUnlockStakingOnly = ui->stakingCheckBox->isChecked(); QDialog::accept(); // Success } break; case Decrypt: if(!model->setWalletEncrypted(false, oldpass)) { QMessageBox::critical(this, tr("Wallet decryption failed"), tr("The passphrase entered for the wallet decryption was incorrect.")); } else { QDialog::accept(); // Success } break; case ChangePass: if(newpass1 == newpass2) { if(model->changePassphrase(oldpass, newpass1)) { QMessageBox::information(this, tr("Wallet encrypted"), tr("Wallet passphrase was successfully changed.")); QDialog::accept(); // Success } else { QMessageBox::critical(this, tr("Wallet encryption failed"), tr("The passphrase entered for the wallet decryption was incorrect.")); } } else { QMessageBox::critical(this, tr("Wallet encryption failed"), tr("The supplied passphrases do not match.")); } break; } } void AskPassphraseDialog::textChanged() { // Validate input, set Ok button to enabled when acceptable bool acceptable = false; switch(mode) { case Encrypt: // New passphrase x2 acceptable = !ui->passEdit2->text().isEmpty() && !ui->passEdit3->text().isEmpty(); break; case UnlockStaking: case Unlock: // Old passphrase x1 case Decrypt: acceptable = !ui->passEdit1->text().isEmpty(); break; case ChangePass: // Old passphrase x1, new passphrase x2 acceptable = !ui->passEdit1->text().isEmpty() && !ui->passEdit2->text().isEmpty() && !ui->passEdit3->text().isEmpty(); break; } ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(acceptable); } bool AskPassphraseDialog::event(QEvent *event) { // Detect Caps Lock key press. if (event->type() == QEvent::KeyPress) { QKeyEvent *ke = static_cast<QKeyEvent *>(event); if (ke->key() == Qt::Key_CapsLock) { fCapsLock = !fCapsLock; } if (fCapsLock) { ui->capsLabel->setText(tr("Warning: The Caps Lock key is on!")); } else { ui->capsLabel->clear(); } } return QWidget::event(event); } bool AskPassphraseDialog::eventFilter(QObject *object, QEvent *event) { /* Detect Caps Lock. * There is no good OS-independent way to check a key state in Qt, but we * can detect Caps Lock by checking for the following condition: * Shift key is down and the result is a lower case character, or * Shift key is not down and the result is an upper case character. */ if (event->type() == QEvent::KeyPress) { QKeyEvent *ke = static_cast<QKeyEvent *>(event); QString str = ke->text(); if (str.length() != 0) { const QChar *psz = str.unicode(); bool fShift = (ke->modifiers() & Qt::ShiftModifier) != 0; if ((fShift && psz->isLower()) || (!fShift && psz->isUpper())) { fCapsLock = true; ui->capsLabel->setText(tr("Warning: The Caps Lock key is on!")); } else if (psz->isLetter()) { fCapsLock = false; ui->capsLabel->clear(); } } } return QDialog::eventFilter(object, event); } void AskPassphraseDialog::secureClearPassFields() { // Attempt to overwrite text so that they do not linger around in memory ui->passEdit1->setText(QString(" ").repeated(ui->passEdit1->text().size())); ui->passEdit2->setText(QString(" ").repeated(ui->passEdit2->text().size())); ui->passEdit3->setText(QString(" ").repeated(ui->passEdit3->text().size())); ui->passEdit1->clear(); ui->passEdit2->clear(); ui->passEdit3->clear(); }
/* Open Asset Import Library (assimp) ---------------------------------------------------------------------- Copyright (c) 2006-2021, assimp team All rights reserved. Redistribution and use of this software in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the assimp team, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission of the assimp team. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------- */ /** @file DefaultLogger.hpp */ #ifndef INCLUDED_AI_DEFAULTLOGGER #define INCLUDED_AI_DEFAULTLOGGER #include "LogStream.hpp" #include "Logger.hpp" #include "NullLogger.hpp" #include <vector> namespace Assimp { // ------------------------------------------------------------------------------------ class IOStream; struct LogStreamInfo; /** default name of logfile */ #define ASSIMP_DEFAULT_LOG_NAME "AssimpLog.txt" // ------------------------------------------------------------------------------------ /** @brief CPP-API: Primary logging facility of Assimp. * * The library stores its primary #Logger as a static member of this class. * #get() returns this primary logger. By default the underlying implementation is * just a #NullLogger which rejects all log messages. By calling #create(), logging * is turned on. To capture the log output multiple log streams (#LogStream) can be * attach to the logger. Some default streams for common streaming locations (such as * a file, std::cout, OutputDebugString()) are also provided. * * If you wish to customize the logging at an even deeper level supply your own * implementation of #Logger to #set(). * @note The whole logging stuff causes a small extra overhead for all imports. */ class ASSIMP_API DefaultLogger : public Logger { public: // ---------------------------------------------------------------------- /** @brief Creates a logging instance. * @param name Name for log file. Only valid in combination * with the aiDefaultLogStream_FILE flag. * @param severity Log severity, DEBUG turns on debug messages and VERBOSE turns on all messages. * @param defStreams Default log streams to be attached. Any bitwise * combination of the aiDefaultLogStream enumerated values. * If #aiDefaultLogStream_FILE is specified but an empty string is * passed for 'name', no log file is created at all. * @param io IOSystem to be used to open external files (such as the * log file). Pass nullptr to rely on the default implementation. * This replaces the default #NullLogger with a #DefaultLogger instance. */ static Logger *create(const char *name = ASSIMP_DEFAULT_LOG_NAME, LogSeverity severity = NORMAL, unsigned int defStreams = aiDefaultLogStream_DEBUGGER | aiDefaultLogStream_FILE, IOSystem *io = nullptr); // ---------------------------------------------------------------------- /** @brief Setup a custom #Logger implementation. * * Use this if the provided #DefaultLogger class doesn't fit into * your needs. If the provided message formatting is OK for you, * it's much easier to use #create() and to attach your own custom * output streams to it. * @param logger Pass NULL to setup a default NullLogger*/ static void set(Logger *logger); // ---------------------------------------------------------------------- /** @brief Getter for singleton instance * @return Only instance. This is never null, but it could be a * NullLogger. Use isNullLogger to check this.*/ static Logger *get(); // ---------------------------------------------------------------------- /** @brief Return whether a #NullLogger is currently active * @return true if the current logger is a #NullLogger. * Use create() or set() to setup a logger that does actually do * something else than just rejecting all log messages. */ static bool isNullLogger(); // ---------------------------------------------------------------------- /** @brief Kills the current singleton logger and replaces it with a * #NullLogger instance. */ static void kill(); // ---------------------------------------------------------------------- /** @copydoc Logger::attachStream */ bool attachStream(LogStream *pStream, unsigned int severity); // ---------------------------------------------------------------------- /** @copydoc Logger::detachStream */ bool detachStream(LogStream *pStream, unsigned int severity); private: // ---------------------------------------------------------------------- /** @briefPrivate construction for internal use by create(). * @param severity Logging granularity */ explicit DefaultLogger(LogSeverity severity); // ---------------------------------------------------------------------- /** @briefDestructor */ ~DefaultLogger(); /** @brief Logs debug infos, only been written when severity level DEBUG or higher is set */ void OnDebug(const char *message); /** @brief Logs debug infos, only been written when severity level VERBOSE is set */ void OnVerboseDebug(const char *message); /** @brief Logs an info message */ void OnInfo(const char *message); /** @brief Logs a warning message */ void OnWarn(const char *message); /** @brief Logs an error message */ void OnError(const char *message); // ---------------------------------------------------------------------- /** @brief Writes a message to all streams */ void WriteToStreams(const char *message, ErrorSeverity ErrorSev); // ---------------------------------------------------------------------- /** @brief Returns the thread id. * @note This is an OS specific feature, if not supported, a * zero will be returned. */ unsigned int GetThreadID(); private: // Aliases for stream container typedef std::vector<LogStreamInfo *> StreamArray; typedef std::vector<LogStreamInfo *>::iterator StreamIt; typedef std::vector<LogStreamInfo *>::const_iterator ConstStreamIt; //! only logging instance static Logger *m_pLogger; static NullLogger s_pNullLogger; //! Attached streams StreamArray m_StreamArray; bool noRepeatMsg; char lastMsg[MAX_LOG_MESSAGE_LENGTH * 2]; size_t lastLen; }; // ------------------------------------------------------------------------------------ } // Namespace Assimp #endif // !! INCLUDED_AI_DEFAULTLOGGER
//================================================================================================== /* EVE - Expressive Vector Engine Copyright : EVE Contributors & Maintainers SPDX-License-Identifier: MIT */ //================================================================================================== #pragma once #include <eve/module/core/regular/interleave.hpp>
//------------------------------------------------------------------------------------------------------- // Copyright (C) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. //------------------------------------------------------------------------------------------------------- #include "RuntimeLibraryPch.h" #include "Debug/DiagHelperMethodWrapper.h" #include "Library/ThrowErrorObject.h" namespace Js { // In some cases we delay throw from helper methods and return ThrowErrorObject instead which we call and throw later. // Then the exception is actually thrown when we call this method. Var ThrowErrorObject::DefaultEntryPoint(RecyclableObject* function, CallInfo callInfo, ...) { ARGUMENTS(args, callInfo); ScriptContext* scriptContext = function->GetScriptContext(); ThrowErrorObject* throwErrorObject = ThrowErrorObject::FromVar(function); bool useExceptionWrapper = scriptContext->IsScriptContextInDebugMode() /* Check for script context is intentional as library code also uses exception wrapper */ && (ScriptContext::IsExceptionWrapperForBuiltInsEnabled(scriptContext) || ScriptContext::IsExceptionWrapperForHelpersEnabled(scriptContext)) && !AutoRegisterIgnoreExceptionWrapper::IsRegistered(scriptContext->GetThreadContext()); if (useExceptionWrapper) { // Forward the throw via regular try-catch wrapper logic that we use for helper/library calls. AutoRegisterIgnoreExceptionWrapper autoWrapper(scriptContext->GetThreadContext()); Var ret = HelperOrLibraryMethodWrapper<true>(scriptContext, [throwErrorObject, scriptContext]() -> Var { JavascriptExceptionOperators::Throw(throwErrorObject->m_error, scriptContext); }); return ret; } else { JavascriptExceptionOperators::Throw(throwErrorObject->m_error, scriptContext); } } ThrowErrorObject::ThrowErrorObject(StaticType* type, JavascriptError* error) : RecyclableObject(type), m_error(error) { } ThrowErrorObject* ThrowErrorObject::New(StaticType* type, JavascriptError* error, Recycler* recycler) { return RecyclerNew(recycler, ThrowErrorObject, type, error); } bool ThrowErrorObject::Is(Var aValue) { return JavascriptOperators::GetTypeId(aValue) == TypeIds_Undefined; } ThrowErrorObject* ThrowErrorObject::FromVar(Var aValue) { Assert(Is(aValue)); return static_cast<ThrowErrorObject*>(RecyclableObject::FromVar(aValue)); } RecyclableObject* ThrowErrorObject::CreateThrowErrorObject(CreateErrorFunc createError, ScriptContext* scriptContext, int32 hCode, PCWSTR varName) { JavascriptLibrary* library = scriptContext->GetLibrary(); JavascriptError *pError = (library->*createError)(); JavascriptError::SetErrorMessage(pError, hCode, varName, scriptContext); return library->CreateThrowErrorObject(pError); } RecyclableObject* ThrowErrorObject::CreateThrowTypeErrorObject(ScriptContext* scriptContext, int32 hCode, PCWSTR varName) { return CreateThrowErrorObject(&JavascriptLibrary::CreateTypeError, scriptContext, hCode, varName); } RecyclableObject* ThrowErrorObject::CreateThrowTypeErrorObject(ScriptContext* scriptContext, int32 hCode, JavascriptString* varName) { return CreateThrowTypeErrorObject(scriptContext, hCode, varName->GetSz()); } }
#include "CUPOT.h" #if ( MODEL == HYDRO && defined GRAVITY ) // include c_ExtAcc_AuxArray[] #ifdef __CUDACC__ #include "CUDA_ConstMemory.h" #endif //----------------------------------------------------------------------------------------- // Function : CPU/CUPOT_HydroGravitySolver // Description : Advances the momentum and energy density of a group of patches by gravitational acceleration // (including external gravity) // // Note : 1. Currently this function does NOT ensure the consistency between internal energy and // dual-energy variable (e.g., entropy) // --> This consistency breaks only for cells with the dual-energy status labelled // as DE_UPDATED_BY_ETOT_GRA // --> We restore this consistency in Gra_Close() // 2. Arrays with a prefix "g_" are stored in the global memory of GPU // // Parameter : g_Flu_Array_New : Array to store the input and output fluid variables // g_Pot_Array_New : Array storing the input potential (at the current step) // --> _New: to be distinguishable from g_Pot_Array_USG[], which is defined at the previous step // g_Corner_Array : Array storing the physical corner coordinates of each patch // g_Pot_Array_USG : Array storing the input potential for UNSPLIT_GRAVITY (at the previous step) // g_Flu_Array_USG : Array storing the input density + momentum for UNSPLIT_GRAVITY (at the previous step) // g_DE_Array : Array storing the dual-energy status (for both input and output) // g_Emag_Array : Array storing the cell-centered magnetic energy // --> Only for checking minimum internal energy in MHD // NPatchGroup : Number of input patch groups (for CPU only) // dt : Time interval to advance solution // dh : Cell size // P5_Gradient : Use 5-points stencil to evaluate the potential gradient // UsePot : Add self-gravity and/or external potential // ExtAcc : Add external acceleration // ExtAcc_Func : Function pointer to the external acceleration routine (for both CPU and GPU) // c_ExtAcc_AuxArray : Auxiliary array for adding external acceleration (for CPU only) // --> When using GPU, this array is stored in the constant memory header // CUDA_ConstMemory.h and does not need to be passed as a function argument // TimeNew : Physical time at the current step (for the external gravity solver) // TimeOld : Physical time at the previous step (for the external gravity solver in UNSPLIT_GRAVITY) // MinEint : Internal energy floor // // Return : g_Flu_Array_New, g_DE_Array //----------------------------------------------------------------------------------------- #ifdef __CUDACC__ __global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array_New[][ CUBE(GRA_NXT) ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ], const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ], char g_DE_Array [][ CUBE(PS1) ], const real g_Emag_Array [][ CUBE(PS1) ], const real dt, const real dh, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double TimeNew, const double TimeOld, const real MinEint ) #else void CPU_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ], const real g_Pot_Array_New[][ CUBE(GRA_NXT) ], const double g_Corner_Array [][3], const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ], const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ], char g_DE_Array [][ CUBE(PS1) ], const real g_Emag_Array [][ CUBE(PS1) ], const int NPatchGroup, const real dt, const real dh, const bool P5_Gradient, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double c_ExtAcc_AuxArray[], const double TimeNew, const double TimeOld, const real MinEint ) #endif { // check # ifdef GAMER_DEBUG if ( ExtAcc && TimeNew < 0.0 ) printf( "ERROR : incorrect TimeNew (%14.7e) !!\n", TimeNew ); # ifdef UNSPLIT_GRAVITY if ( g_Flu_Array_USG == NULL ) printf( "ERROR : g_Flu_Array_USG == NULL !!\n" ); if ( UsePot && g_Pot_Array_USG == NULL ) printf( "ERROR : g_Pot_Array_USG == NULL !!\n" ); if ( ExtAcc && ( TimeOld >= TimeNew || TimeOld < 0.0 ) ) printf( "ERROR : incorrect time (TimeOld %14.7e, TimeNew = %14.7e) !!\n", TimeOld, TimeNew ); # endif # ifdef DUAL_ENERGY if ( g_DE_Array == NULL ) printf( "ERROR : g_DE_Array == NULL !!\n" ); # endif # ifdef MHD if ( g_Emag_Array == NULL ) printf( "ERROR : g_Emag_Array == NULL !!\n" ); # endif # endif // #ifdef GAMER_DEBUG const real Gra_Const = ( P5_Gradient ) ? -dt/(12.0*dh) : -dt/(2.0*dh); const int PS1_sqr = SQR(PS1); const int didx_new[3] = { 1, GRA_NXT, SQR(GRA_NXT) }; # ifdef UNSPLIT_GRAVITY const int didx_old[3] = { 1, USG_NXT_G, SQR(USG_NXT_G) }; # endif // load potential from global to shared memory to improve the GPU performance # ifdef __CUDACC__ __shared__ real s_pot_new[ CUBE(GRA_NXT) ]; # ifdef UNSPLIT_GRAVITY __shared__ real s_pot_old[ CUBE(USG_NXT_G) ]; # endif if ( UsePot ) { for (int t=threadIdx.x; t<CUBE(GRA_NXT); t+=GRA_BLOCK_SIZE) s_pot_new[t] = g_Pot_Array_New[blockIdx.x][t]; # ifdef UNSPLIT_GRAVITY for (int t=threadIdx.x; t<CUBE(USG_NXT_G); t+=GRA_BLOCK_SIZE) s_pot_old[t] = g_Pot_Array_USG[blockIdx.x][t]; # endif } __syncthreads(); # endif // #ifdef __CUDACC__ // loop over all patches // --> CPU/GPU solver: use different (OpenMP threads) / (CUDA thread blocks) // to work on different patches # ifdef __CUDACC__ const int P = blockIdx.x; # else # pragma omp parallel for schedule( runtime ) for (int P=0; P<NPatchGroup*8; P++) # endif { // point to the potential array of the target patch # ifdef __CUDACC__ const real *const pot_new = s_pot_new; # ifdef UNSPLIT_GRAVITY const real *const pot_old = s_pot_old; # endif # else // #ifdef __CUDACC__ const real *const pot_new = g_Pot_Array_New[P]; # ifdef UNSPLIT_GRAVITY const real *const pot_old = g_Pot_Array_USG[P]; # endif # endif // #ifdef __CUDACC__ ... else ... // loop over all cells of the target patch // _g0: indices for the arrays without any ghost zone CGPU_LOOP( idx_g0, CUBE(PS1) ) { // Enki = non-kinetic energy (i.e. Etot - Ekin) real acc_new[3]={0.0, 0.0, 0.0}, px_new, py_new, pz_new, rho_new, Enki_in, Ekin_out, Etot_in, Etot_out, _rho2; # ifdef UNSPLIT_GRAVITY real acc_old[3]={0.0, 0.0, 0.0}, px_old, py_old, pz_old, rho_old, Emag_in=0.0; # endif const int i_g0 = idx_g0 % PS1; const int j_g0 = idx_g0 % PS1_sqr / PS1; const int k_g0 = idx_g0 / PS1_sqr; const int i_new = i_g0 + GRA_GHOST_SIZE; const int j_new = j_g0 + GRA_GHOST_SIZE; const int k_new = k_g0 + GRA_GHOST_SIZE; const int idx_new = IDX321( i_new, j_new, k_new, GRA_NXT, GRA_NXT ); # ifdef UNSPLIT_GRAVITY const int i_old = i_g0 + USG_GHOST_SIZE_G; const int j_old = j_g0 + USG_GHOST_SIZE_G; const int k_old = k_g0 + USG_GHOST_SIZE_G; const int idx_old = IDX321( i_old, j_old, k_old, USG_NXT_G, USG_NXT_G ); # endif // external acceleration if ( ExtAcc ) { double x, y, z; x = g_Corner_Array[P][0] + (double)(i_g0*dh); y = g_Corner_Array[P][1] + (double)(j_g0*dh); z = g_Corner_Array[P][2] + (double)(k_g0*dh); ExtAcc_Func( acc_new, x, y, z, TimeNew, c_ExtAcc_AuxArray ); for (int d=0; d<3; d++) acc_new[d] *= dt; # ifdef UNSPLIT_GRAVITY ExtAcc_Func( acc_old, x, y, z, TimeOld, c_ExtAcc_AuxArray ); for (int d=0; d<3; d++) acc_old[d] *= dt; # endif } // self-gravity and external potential if ( UsePot ) { const int ip1_new = idx_new + didx_new[0]; const int jp1_new = idx_new + didx_new[1]; const int kp1_new = idx_new + didx_new[2]; const int im1_new = idx_new - didx_new[0]; const int jm1_new = idx_new - didx_new[1]; const int km1_new = idx_new - didx_new[2]; # ifdef UNSPLIT_GRAVITY const int ip1_old = idx_old + didx_old[0]; const int jp1_old = idx_old + didx_old[1]; const int kp1_old = idx_old + didx_old[2]; const int im1_old = idx_old - didx_old[0]; const int jm1_old = idx_old - didx_old[1]; const int km1_old = idx_old - didx_old[2]; # endif if ( P5_Gradient ) { const real Const_8 = (real)8.0; const int ip2_new = ip1_new + didx_new[0]; const int jp2_new = jp1_new + didx_new[1]; const int kp2_new = kp1_new + didx_new[2]; const int im2_new = im1_new - didx_new[0]; const int jm2_new = jm1_new - didx_new[1]; const int km2_new = km1_new - didx_new[2]; # ifdef UNSPLIT_GRAVITY const int ip2_old = ip1_old + didx_old[0]; const int jp2_old = jp1_old + didx_old[1]; const int kp2_old = kp1_old + didx_old[2]; const int im2_old = im1_old - didx_old[0]; const int jm2_old = jm1_old - didx_old[1]; const int km2_old = km1_old - didx_old[2]; # endif acc_new[0] += Gra_Const*( - pot_new[ip2_new] + Const_8*pot_new[ip1_new] - Const_8*pot_new[im1_new] + pot_new[im2_new] ); acc_new[1] += Gra_Const*( - pot_new[jp2_new] + Const_8*pot_new[jp1_new] - Const_8*pot_new[jm1_new] + pot_new[jm2_new] ); acc_new[2] += Gra_Const*( - pot_new[kp2_new] + Const_8*pot_new[kp1_new] - Const_8*pot_new[km1_new] + pot_new[km2_new] ); # ifdef UNSPLIT_GRAVITY acc_old[0] += Gra_Const*( - pot_old[ip2_old] + Const_8*pot_old[ip1_old] - Const_8*pot_old[im1_old] + pot_old[im2_old] ); acc_old[1] += Gra_Const*( - pot_old[jp2_old] + Const_8*pot_old[jp1_old] - Const_8*pot_old[jm1_old] + pot_old[jm2_old] ); acc_old[2] += Gra_Const*( - pot_old[kp2_old] + Const_8*pot_old[kp1_old] - Const_8*pot_old[km1_old] + pot_old[km2_old] ); # endif } // if ( P5_Gradient ) else { acc_new[0] += Gra_Const*( pot_new[ip1_new] - pot_new[im1_new] ); acc_new[1] += Gra_Const*( pot_new[jp1_new] - pot_new[jm1_new] ); acc_new[2] += Gra_Const*( pot_new[kp1_new] - pot_new[km1_new] ); # ifdef UNSPLIT_GRAVITY acc_old[0] += Gra_Const*( pot_old[ip1_old] - pot_old[im1_old] ); acc_old[1] += Gra_Const*( pot_old[jp1_old] - pot_old[jm1_old] ); acc_old[2] += Gra_Const*( pot_old[kp1_old] - pot_old[km1_old] ); # endif } // if ( P5_Gradient ) ... else ... } // if ( UsePot ) // advance fluid # ifdef UNSPLIT_GRAVITY rho_new = g_Flu_Array_New[P][DENS][idx_g0]; rho_old = g_Flu_Array_USG[P][DENS][idx_g0]; px_new = g_Flu_Array_New[P][MOMX][idx_g0]; px_old = g_Flu_Array_USG[P][MOMX][idx_g0]; py_new = g_Flu_Array_New[P][MOMY][idx_g0]; py_old = g_Flu_Array_USG[P][MOMY][idx_g0]; pz_new = g_Flu_Array_New[P][MOMZ][idx_g0]; pz_old = g_Flu_Array_USG[P][MOMZ][idx_g0]; // backup the original non-kinetic energy so that we can restore it later if necessary _rho2 = (real)0.5/rho_new; Etot_in = g_Flu_Array_New[P][ENGY][idx_g0]; Enki_in = Etot_in - _rho2*( SQR(px_new) + SQR(py_new) + SQR(pz_new) ); # ifdef MHD Emag_in = g_Emag_Array[P][idx_g0]; # endif // update the momentum density px_new += (real)0.5*( rho_old*acc_old[0] + rho_new*acc_new[0] ); py_new += (real)0.5*( rho_old*acc_old[1] + rho_new*acc_new[1] ); pz_new += (real)0.5*( rho_old*acc_old[2] + rho_new*acc_new[2] ); g_Flu_Array_New[P][MOMX][idx_g0] = px_new; g_Flu_Array_New[P][MOMY][idx_g0] = py_new; g_Flu_Array_New[P][MOMZ][idx_g0] = pz_new; // record the updated kinematic energy density Ekin_out = _rho2*( SQR(px_new) + SQR(py_new) + SQR(pz_new) ); // update the total energy density # ifdef DUAL_ENERGY // for the unsplitting method with the dual-energy formalism, we correct the **total energy density** // only if the dual-energy status != DE_UPDATED_BY_DUAL // --> for (a) DE_UPDATED_BY_DUAL --> Eint has been updated by the dual-energy variable // (b) DE_UPDATED_BY_MIN_PRES --> Eint has been set to the minimum threshold // --> currently for (b) we still update the total energy density if ( g_DE_Array[P][idx_g0] == DE_UPDATED_BY_DUAL ) { // fix the internal energy and the dual-energy variable Etot_out = Enki_in + Ekin_out; } else { // update the total energy, where internal energy and dual-energy variable may change as well Etot_out = Etot_in + (real)0.5*( px_old*acc_old[0] + py_old*acc_old[1] + pz_old*acc_old[2] + px_new*acc_new[0] + py_new*acc_new[1] + pz_new*acc_new[2] ); // check the minimum internal energy //###NOTE: assuming Etot = Eint + Ekin + Emag // (a) if the updated internal energy is greater than the threshold, set the dual-energy status == DE_UPDATED_BY_ETOT_GRA if ( Etot_out - Ekin_out - Emag_in >= MinEint ) g_DE_Array[P][idx_g0] = DE_UPDATED_BY_ETOT_GRA; // (b) otherwise restore the original internal energy and keep the original dual-energy status else Etot_out = Enki_in + Ekin_out; } # else // # ifdef DUAL_ENERGY // for the unsplitting method without the dual-energy formalism, we always correct the total energy density // instead of the kinematic energy density // --> internal energy may change // --> we must check the minimum internal energy after this update Etot_out = Etot_in + (real)0.5*( px_old*acc_old[0] + py_old*acc_old[1] + pz_old*acc_old[2] + px_new*acc_new[0] + py_new*acc_new[1] + pz_new*acc_new[2] ); // check the minimum internal energy // --> restore the original internal energy if the updated value becomes smaller than the threshold if ( Etot_out - Ekin_out - Emag_in < MinEint ) Etot_out = Enki_in + Ekin_out; # endif // #ifdef DUAL_ENERGY ... else ... # else // #ifdef UNSPLIT_GRAVITY rho_new = g_Flu_Array_New[P][DENS][idx_g0]; px_new = g_Flu_Array_New[P][MOMX][idx_g0]; py_new = g_Flu_Array_New[P][MOMY][idx_g0]; pz_new = g_Flu_Array_New[P][MOMZ][idx_g0]; // backup the original internal energy so that we can restore it later _rho2 = (real)0.5/rho_new; Etot_in = g_Flu_Array_New[P][ENGY][idx_g0]; Enki_in = Etot_in - _rho2*( SQR(px_new) + SQR(py_new) + SQR(pz_new) ); // update the momentum density px_new += rho_new*acc_new[0]; py_new += rho_new*acc_new[1]; pz_new += rho_new*acc_new[2]; g_Flu_Array_New[P][MOMX][idx_g0] = px_new; g_Flu_Array_New[P][MOMY][idx_g0] = py_new; g_Flu_Array_New[P][MOMZ][idx_g0] = pz_new; // for the splitting method, we ensure that the internal energy is unchanged Ekin_out = _rho2*( SQR(px_new) + SQR(py_new) + SQR(pz_new) ); Etot_out = Enki_in + Ekin_out; # endif // #ifdef UNSPLIT_GRAVITY ... else ... // store the updated total energy density to the output array g_Flu_Array_New[P][ENGY][idx_g0] = Etot_out; } // CGPU_LOOP( idx_g0, CUBE(PS1) ) } // for (int P=0; P<NPatchGroup*8; P++) } // FUNCTION : CPU/CUPOT_HydroGravitySolver #endif // #if ( MODEL == HYDRO && defined GRAVITY )
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/content_settings/content_settings_policy_provider.h" #include <string> #include <vector> #include "base/json/json_reader.h" #include "base/values.h" #include "chrome/browser/content_settings/content_settings_rule.h" #include "chrome/browser/content_settings/content_settings_utils.h" #include "chrome/browser/prefs/pref_service.h" #include "chrome/common/chrome_notification_types.h" #include "chrome/common/content_settings_pattern.h" #include "chrome/common/pref_names.h" #include "content/public/browser/browser_thread.h" #include "content/public/browser/notification_details.h" #include "content/public/browser/notification_source.h" using content::BrowserThread; namespace { // The preferences used to manage ContentSettingsTypes. const char* kPrefToManageType[CONTENT_SETTINGS_NUM_TYPES] = { prefs::kManagedDefaultCookiesSetting, prefs::kManagedDefaultImagesSetting, prefs::kManagedDefaultJavaScriptSetting, prefs::kManagedDefaultPluginsSetting, prefs::kManagedDefaultPopupsSetting, prefs::kManagedDefaultGeolocationSetting, prefs::kManagedDefaultNotificationsSetting, NULL, // No policy for default value of content type intents NULL, // No policy for default value of content type auto-select-certificate NULL, // No policy for default value of fullscreen requests }; struct PrefsForManagedContentSettingsMapEntry { const char* pref_name; ContentSettingsType content_type; ContentSetting setting; }; const PrefsForManagedContentSettingsMapEntry kPrefsForManagedContentSettingsMap[] = { { prefs::kManagedCookiesAllowedForUrls, CONTENT_SETTINGS_TYPE_COOKIES, CONTENT_SETTING_ALLOW }, { prefs::kManagedCookiesSessionOnlyForUrls, CONTENT_SETTINGS_TYPE_COOKIES, CONTENT_SETTING_SESSION_ONLY }, { prefs::kManagedCookiesBlockedForUrls, CONTENT_SETTINGS_TYPE_COOKIES, CONTENT_SETTING_BLOCK }, { prefs::kManagedImagesAllowedForUrls, CONTENT_SETTINGS_TYPE_IMAGES, CONTENT_SETTING_ALLOW }, { prefs::kManagedImagesBlockedForUrls, CONTENT_SETTINGS_TYPE_IMAGES, CONTENT_SETTING_BLOCK }, { prefs::kManagedJavaScriptAllowedForUrls, CONTENT_SETTINGS_TYPE_JAVASCRIPT, CONTENT_SETTING_ALLOW }, { prefs::kManagedJavaScriptBlockedForUrls, CONTENT_SETTINGS_TYPE_JAVASCRIPT, CONTENT_SETTING_BLOCK }, { prefs::kManagedPluginsAllowedForUrls, CONTENT_SETTINGS_TYPE_PLUGINS, CONTENT_SETTING_ALLOW }, { prefs::kManagedPluginsBlockedForUrls, CONTENT_SETTINGS_TYPE_PLUGINS, CONTENT_SETTING_BLOCK }, { prefs::kManagedPopupsAllowedForUrls, CONTENT_SETTINGS_TYPE_POPUPS, CONTENT_SETTING_ALLOW }, { prefs::kManagedPopupsBlockedForUrls, CONTENT_SETTINGS_TYPE_POPUPS, CONTENT_SETTING_BLOCK }, { prefs::kManagedNotificationsAllowedForUrls, CONTENT_SETTINGS_TYPE_NOTIFICATIONS, CONTENT_SETTING_ALLOW }, { prefs::kManagedNotificationsBlockedForUrls, CONTENT_SETTINGS_TYPE_NOTIFICATIONS, CONTENT_SETTING_BLOCK } }; } // namespace namespace content_settings { // static void PolicyProvider::RegisterUserPrefs(PrefService* prefs) { prefs->RegisterListPref(prefs::kManagedAutoSelectCertificateForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedCookiesAllowedForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedCookiesBlockedForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedCookiesSessionOnlyForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedImagesAllowedForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedImagesBlockedForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedJavaScriptAllowedForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedJavaScriptBlockedForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedPluginsAllowedForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedPluginsBlockedForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedPopupsAllowedForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedPopupsBlockedForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedNotificationsAllowedForUrls, PrefService::UNSYNCABLE_PREF); prefs->RegisterListPref(prefs::kManagedNotificationsBlockedForUrls, PrefService::UNSYNCABLE_PREF); // Preferences for default content setting policies. If a policy is not set of // the corresponding preferences below is set to CONTENT_SETTING_DEFAULT. prefs->RegisterIntegerPref(prefs::kManagedDefaultCookiesSetting, CONTENT_SETTING_DEFAULT, PrefService::UNSYNCABLE_PREF); prefs->RegisterIntegerPref(prefs::kManagedDefaultImagesSetting, CONTENT_SETTING_DEFAULT, PrefService::UNSYNCABLE_PREF); prefs->RegisterIntegerPref(prefs::kManagedDefaultJavaScriptSetting, CONTENT_SETTING_DEFAULT, PrefService::UNSYNCABLE_PREF); prefs->RegisterIntegerPref(prefs::kManagedDefaultPluginsSetting, CONTENT_SETTING_DEFAULT, PrefService::UNSYNCABLE_PREF); prefs->RegisterIntegerPref(prefs::kManagedDefaultPopupsSetting, CONTENT_SETTING_DEFAULT, PrefService::UNSYNCABLE_PREF); prefs->RegisterIntegerPref(prefs::kManagedDefaultGeolocationSetting, CONTENT_SETTING_DEFAULT, PrefService::UNSYNCABLE_PREF); prefs->RegisterIntegerPref(prefs::kManagedDefaultNotificationsSetting, CONTENT_SETTING_DEFAULT, PrefService::UNSYNCABLE_PREF); } PolicyProvider::PolicyProvider(PrefService* prefs) : prefs_(prefs) { DCHECK_EQ(arraysize(kPrefToManageType), static_cast<size_t>(CONTENT_SETTINGS_NUM_TYPES)); ReadManagedDefaultSettings(); ReadManagedContentSettings(false); pref_change_registrar_.Init(prefs_); pref_change_registrar_.Add(prefs::kManagedAutoSelectCertificateForUrls, this); pref_change_registrar_.Add(prefs::kManagedCookiesBlockedForUrls, this); pref_change_registrar_.Add(prefs::kManagedCookiesAllowedForUrls, this); pref_change_registrar_.Add(prefs::kManagedCookiesSessionOnlyForUrls, this); pref_change_registrar_.Add(prefs::kManagedImagesBlockedForUrls, this); pref_change_registrar_.Add(prefs::kManagedImagesAllowedForUrls, this); pref_change_registrar_.Add(prefs::kManagedJavaScriptBlockedForUrls, this); pref_change_registrar_.Add(prefs::kManagedJavaScriptAllowedForUrls, this); pref_change_registrar_.Add(prefs::kManagedPluginsBlockedForUrls, this); pref_change_registrar_.Add(prefs::kManagedPluginsAllowedForUrls, this); pref_change_registrar_.Add(prefs::kManagedPopupsBlockedForUrls, this); pref_change_registrar_.Add(prefs::kManagedPopupsAllowedForUrls, this); pref_change_registrar_.Add(prefs::kManagedNotificationsAllowedForUrls, this); pref_change_registrar_.Add(prefs::kManagedNotificationsBlockedForUrls, this); // The following preferences are only used to indicate if a // default content setting is managed and to hold the managed default setting // value. If the value for any of the following perferences is set then the // corresponding default content setting is managed. These preferences exist // in parallel to the preference default content settings. If a // default content settings type is managed any user defined excpetions // (patterns) for this type are ignored. pref_change_registrar_.Add(prefs::kManagedDefaultCookiesSetting, this); pref_change_registrar_.Add(prefs::kManagedDefaultImagesSetting, this); pref_change_registrar_.Add(prefs::kManagedDefaultJavaScriptSetting, this); pref_change_registrar_.Add(prefs::kManagedDefaultPluginsSetting, this); pref_change_registrar_.Add(prefs::kManagedDefaultPopupsSetting, this); pref_change_registrar_.Add(prefs::kManagedDefaultGeolocationSetting, this); pref_change_registrar_.Add(prefs::kManagedDefaultNotificationsSetting, this); } PolicyProvider::~PolicyProvider() { DCHECK(!prefs_); } RuleIterator* PolicyProvider::GetRuleIterator( ContentSettingsType content_type, const ResourceIdentifier& resource_identifier, bool incognito) const { return value_map_.GetRuleIterator(content_type, resource_identifier, &lock_); } void PolicyProvider::GetContentSettingsFromPreferences( OriginIdentifierValueMap* value_map) { for (size_t i = 0; i < arraysize(kPrefsForManagedContentSettingsMap); ++i) { const char* pref_name = kPrefsForManagedContentSettingsMap[i].pref_name; // Skip unset policies. if (!prefs_->HasPrefPath(pref_name)) { VLOG(2) << "Skipping unset preference: " << pref_name; continue; } const PrefService::Preference* pref = prefs_->FindPreference(pref_name); DCHECK(pref); DCHECK(pref->IsManaged()); const base::ListValue* pattern_str_list = NULL; if (!pref->GetValue()->GetAsList(&pattern_str_list)) { NOTREACHED(); return; } for (size_t j = 0; j < pattern_str_list->GetSize(); ++j) { std::string original_pattern_str; if (!pattern_str_list->GetString(j, &original_pattern_str)) { NOTREACHED(); continue; } PatternPair pattern_pair = ParsePatternString(original_pattern_str); // Ignore invalid patterns. if (!pattern_pair.first.IsValid()) { VLOG(1) << "Ignoring invalid content settings pattern: " << original_pattern_str; continue; } ContentSettingsType content_type = kPrefsForManagedContentSettingsMap[i].content_type; DCHECK_NE(content_type, CONTENT_SETTINGS_TYPE_AUTO_SELECT_CERTIFICATE); // If only one pattern was defined auto expand it to a pattern pair. ContentSettingsPattern secondary_pattern = !pattern_pair.second.IsValid() ? ContentSettingsPattern::Wildcard() : pattern_pair.second; value_map->SetValue( pattern_pair.first, secondary_pattern, content_type, ResourceIdentifier(NO_RESOURCE_IDENTIFIER), base::Value::CreateIntegerValue( kPrefsForManagedContentSettingsMap[i].setting)); } } } void PolicyProvider::GetAutoSelectCertificateSettingsFromPreferences( OriginIdentifierValueMap* value_map) { const char* pref_name = prefs::kManagedAutoSelectCertificateForUrls; if (!prefs_->HasPrefPath(pref_name)) { VLOG(2) << "Skipping unset preference: " << pref_name; return; } const PrefService::Preference* pref = prefs_->FindPreference(pref_name); DCHECK(pref); DCHECK(pref->IsManaged()); const base::ListValue* pattern_filter_str_list = NULL; if (!pref->GetValue()->GetAsList(&pattern_filter_str_list)) { NOTREACHED(); return; } // Parse the list of pattern filter strings. A pattern filter string has // the following JSON format: // // { // "pattern": <content settings pattern string>, // "filter" : <certificate filter in JSON format> // } // // e.g. // { // "pattern": "[*.]example.com", // "filter": { // "ISSUER": { // "CN": "some name" // } // } // } for (size_t j = 0; j < pattern_filter_str_list->GetSize(); ++j) { std::string pattern_filter_json; if (!pattern_filter_str_list->GetString(j, &pattern_filter_json)) { NOTREACHED(); continue; } scoped_ptr<base::Value> value(base::JSONReader::Read(pattern_filter_json, base::JSON_ALLOW_TRAILING_COMMAS)); if (!value.get()) { VLOG(1) << "Ignoring invalid certificate auto select setting. Reason:" " Invalid JSON format: " << pattern_filter_json; continue; } scoped_ptr<base::DictionaryValue> pattern_filter_pair( static_cast<base::DictionaryValue*>(value.release())); std::string pattern_str; bool pattern_read = pattern_filter_pair->GetString("pattern", &pattern_str); base::Value* cert_filter_ptr = NULL; bool filter_read = pattern_filter_pair->Remove("filter", &cert_filter_ptr); scoped_ptr<base::Value> cert_filter(cert_filter_ptr); if (!pattern_read || !filter_read) { VLOG(1) << "Ignoring invalid certificate auto select setting. Reason:" " Missing pattern or filter."; continue; } ContentSettingsPattern pattern = ContentSettingsPattern::FromString(pattern_str); // Ignore invalid patterns. if (!pattern.IsValid()) { VLOG(1) << "Ignoring invalid certificate auto select setting:" " Invalid content settings pattern: " << pattern; continue; } DCHECK(cert_filter->IsType(base::Value::TYPE_DICTIONARY)); value_map->SetValue(pattern, ContentSettingsPattern::Wildcard(), CONTENT_SETTINGS_TYPE_AUTO_SELECT_CERTIFICATE, std::string(), cert_filter.release()); } } void PolicyProvider::ReadManagedDefaultSettings() { for (size_t type = 0; type < arraysize(kPrefToManageType); ++type) { if (kPrefToManageType[type] == NULL) { continue; } UpdateManagedDefaultSetting(ContentSettingsType(type)); } } void PolicyProvider::UpdateManagedDefaultSetting( ContentSettingsType content_type) { // If a pref to manage a default-content-setting was not set (NOTICE: // "HasPrefPath" returns false if no value was set for a registered pref) then // the default value of the preference is used. The default value of a // preference to manage a default-content-settings is CONTENT_SETTING_DEFAULT. // This indicates that no managed value is set. If a pref was set, than it // MUST be managed. DCHECK(!prefs_->HasPrefPath(kPrefToManageType[content_type]) || prefs_->IsManagedPreference(kPrefToManageType[content_type])); base::AutoLock auto_lock(lock_); int setting = prefs_->GetInteger(kPrefToManageType[content_type]); if (setting == CONTENT_SETTING_DEFAULT) { value_map_.DeleteValue( ContentSettingsPattern::Wildcard(), ContentSettingsPattern::Wildcard(), content_type, std::string()); } else { value_map_.SetValue( ContentSettingsPattern::Wildcard(), ContentSettingsPattern::Wildcard(), content_type, std::string(), Value::CreateIntegerValue(setting)); } } void PolicyProvider::ReadManagedContentSettings(bool overwrite) { base::AutoLock auto_lock(lock_); if (overwrite) value_map_.clear(); GetContentSettingsFromPreferences(&value_map_); GetAutoSelectCertificateSettingsFromPreferences(&value_map_); } // Since the PolicyProvider is a read only content settings provider, all // methodes of the ProviderInterface that set or delete any settings do nothing. bool PolicyProvider::SetWebsiteSetting( const ContentSettingsPattern& primary_pattern, const ContentSettingsPattern& secondary_pattern, ContentSettingsType content_type, const ResourceIdentifier& resource_identifier, Value* value) { return false; } void PolicyProvider::ClearAllContentSettingsRules( ContentSettingsType content_type) { } void PolicyProvider::ShutdownOnUIThread() { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); RemoveAllObservers(); if (!prefs_) return; pref_change_registrar_.RemoveAll(); prefs_ = NULL; } void PolicyProvider::Observe(int type, const content::NotificationSource& source, const content::NotificationDetails& details) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); if (type == chrome::NOTIFICATION_PREF_CHANGED) { DCHECK_EQ(prefs_, content::Source<PrefService>(source).ptr()); std::string* name = content::Details<std::string>(details).ptr(); if (*name == prefs::kManagedDefaultCookiesSetting) { UpdateManagedDefaultSetting(CONTENT_SETTINGS_TYPE_COOKIES); } else if (*name == prefs::kManagedDefaultImagesSetting) { UpdateManagedDefaultSetting(CONTENT_SETTINGS_TYPE_IMAGES); } else if (*name == prefs::kManagedDefaultJavaScriptSetting) { UpdateManagedDefaultSetting(CONTENT_SETTINGS_TYPE_JAVASCRIPT); } else if (*name == prefs::kManagedDefaultPluginsSetting) { UpdateManagedDefaultSetting(CONTENT_SETTINGS_TYPE_PLUGINS); } else if (*name == prefs::kManagedDefaultPopupsSetting) { UpdateManagedDefaultSetting(CONTENT_SETTINGS_TYPE_POPUPS); } else if (*name == prefs::kManagedDefaultGeolocationSetting) { UpdateManagedDefaultSetting(CONTENT_SETTINGS_TYPE_GEOLOCATION); } else if (*name == prefs::kManagedDefaultNotificationsSetting) { UpdateManagedDefaultSetting(CONTENT_SETTINGS_TYPE_NOTIFICATIONS); } else if (*name == prefs::kManagedAutoSelectCertificateForUrls || *name == prefs::kManagedCookiesAllowedForUrls || *name == prefs::kManagedCookiesBlockedForUrls || *name == prefs::kManagedCookiesSessionOnlyForUrls || *name == prefs::kManagedImagesAllowedForUrls || *name == prefs::kManagedImagesBlockedForUrls || *name == prefs::kManagedJavaScriptAllowedForUrls || *name == prefs::kManagedJavaScriptBlockedForUrls || *name == prefs::kManagedPluginsAllowedForUrls || *name == prefs::kManagedPluginsBlockedForUrls || *name == prefs::kManagedPopupsAllowedForUrls || *name == prefs::kManagedPopupsBlockedForUrls || *name == prefs::kManagedNotificationsAllowedForUrls || *name == prefs::kManagedNotificationsBlockedForUrls) { ReadManagedContentSettings(true); ReadManagedDefaultSettings(); } } else { NOTREACHED() << "Unexpected notification"; return; } NotifyObservers(ContentSettingsPattern(), ContentSettingsPattern(), CONTENT_SETTINGS_TYPE_DEFAULT, std::string()); } } // namespace content_settings
#include <iostream> int main() { int total = 0; int max_number; bool is_valid = false; do { std::cout << "Enter a number greater than 0: " << std::flush; std::cin >> max_number; is_valid = (max_number > 0) ? true : false; } while (!is_valid); for (int n = 1; n <= max_number; n++) { total += n; } std::cout << "Total: " << total << std::endl; system("pause"); return 0; }
// // libtgvoip is free and unencumbered public domain software. // For more information, see http://unlicense.org or the UNLICENSE file // you should have received with this source code distribution. // #include "json11.hpp" #include "logging.h" #include "threading.h" #include "Buffers.h" #include "OpusDecoder.h" #include "OpusEncoder.h" #include "PacketSender.h" #include "PrivateDefines.h" #include "VoIPController.h" #include "VoIPServerConfig.h" #include "video/VideoPacketSender.h" #ifndef _WIN32 #include <sys/time.h> #include <unistd.h> #endif #if TGVOIP_INCLUDE_OPUS_PACKAGE #include <opus/opus.h> #else #include <opus.h> #endif #include <algorithm> #include <cassert> #include <cerrno> #include <cinttypes> #include <cmath> #include <cstring> #include <ctime> #include <cwchar> #include <limits> #include <sstream> #include <stdexcept> inline int pad4(int x) { int r = PAD4(x); if (r == 4) return 0; return r; } using namespace tgvoip; #ifdef __APPLE__ #include "os/darwin/AudioUnitIO.h" #include <mach/mach_time.h> double VoIPController::machTimebase = 0; std::uint64_t VoIPController::machTimestart = 0; #endif #ifdef _WIN32 std::int64_t VoIPController::win32TimeScale = 0; bool VoIPController::didInitWin32TimeScale = false; #endif #ifdef __ANDROID__ #include "NetworkSocket.h" #include "os/android/AudioInputAndroid.h" #include "os/android/JNIUtilities.h" extern jclass jniUtilitiesClass; #endif #if defined(TGVOIP_USE_CALLBACK_AUDIO_IO) #include "audio/AudioIOCallback.h" #endif #define ENFORCE_MSG_THREAD assert(m_messageThread.IsCurrent()) extern FILE* tgvoipLogFile; #pragma mark - Public API VoIPController::VoIPController() : m_congestionControl(new CongestionControl()) , m_udpSocket(NetworkSocket::Create(NetworkProtocol::UDP)) , m_realUdpSocket(m_udpSocket) , m_selectCanceller(SocketSelectCanceller::Create()) , m_rawSendQueue(64) { ServerConfig* serverConfigInstance = ServerConfig::GetSharedInstance(); m_maxAudioBitrate = static_cast<std::uint32_t>(serverConfigInstance->GetInt("audio_max_bitrate", 20000)); m_maxAudioBitrateGPRS = static_cast<std::uint32_t>(serverConfigInstance->GetInt("audio_max_bitrate_gprs", 8000)); m_maxAudioBitrateEDGE = static_cast<std::uint32_t>(serverConfigInstance->GetInt("audio_max_bitrate_edge", 16000)); m_maxAudioBitrateSaving = static_cast<std::uint32_t>(serverConfigInstance->GetInt("audio_max_bitrate_saving", 8000)); m_initAudioBitrate = static_cast<std::uint32_t>(serverConfigInstance->GetInt("audio_init_bitrate", 16000)); m_initAudioBitrateGPRS = static_cast<std::uint32_t>(serverConfigInstance->GetInt("audio_init_bitrate_gprs", 8000)); m_initAudioBitrateEDGE = static_cast<std::uint32_t>(serverConfigInstance->GetInt("audio_init_bitrate_edge", 8000)); m_initAudioBitrateSaving = static_cast<std::uint32_t>(serverConfigInstance->GetInt("audio_init_bitrate_saving", 8000)); m_audioBitrateStepIncr = static_cast<std::uint32_t>(serverConfigInstance->GetInt("audio_bitrate_step_incr", 1000)); m_audioBitrateStepDecr = static_cast<std::uint32_t>(serverConfigInstance->GetInt("audio_bitrate_step_decr", 1000)); m_minAudioBitrate = static_cast<std::uint32_t>(serverConfigInstance->GetInt("audio_min_bitrate", 8000)); m_needRateFlags = static_cast<std::uint32_t>(serverConfigInstance->GetInt("rate_flags", std::int32_t{~0})); m_maxUnsentStreamPackets = static_cast<std::uint32_t>(serverConfigInstance->GetInt("max_unsent_stream_packets", 2)); m_unackNopThreshold = static_cast<std::uint32_t>(serverConfigInstance->GetInt("unack_nop_threshold", 10)); m_relaySwitchThreshold = serverConfigInstance->GetDouble("relay_switch_threshold", 0.8); m_p2pToRelaySwitchThreshold = serverConfigInstance->GetDouble("p2p_to_relay_switch_threshold", 0.6); m_relayToP2pSwitchThreshold = serverConfigInstance->GetDouble("relay_to_p2p_switch_threshold", 0.8); m_reconnectingTimeout = serverConfigInstance->GetDouble("reconnecting_state_timeout", 2.0); m_rateMaxAcceptableRTT = serverConfigInstance->GetDouble("rate_min_rtt", 0.6); m_rateMaxAcceptableSendLoss = serverConfigInstance->GetDouble("rate_min_send_loss", 0.2); m_packetLossToEnableExtraEC = serverConfigInstance->GetDouble("packet_loss_for_extra_ec", 0.02); #ifdef __APPLE__ machTimestart = 0; #endif std::shared_ptr<Stream> stream = std::make_shared<Stream>(); stream->id = 1; stream->type = StreamType::AUDIO; stream->codec = CODEC_OPUS; stream->enabled = true; stream->frameDuration = 60; m_outgoingStreams.emplace_back(std::move(stream)); } VoIPController::~VoIPController() { LOGD("Entered VoIPController::~VoIPController"); if (!m_stopping) { LOGE("!!!!!!!!!!!!!!!!!!!! CALL controller->Stop() BEFORE DELETING THE CONTROLLER OBJECT !!!!!!!!!!!!!!!!!!!!!!!1"); std::abort(); } LOGD("before close socket"); delete m_udpSocket; if (m_realUdpSocket != m_udpSocket) delete m_realUdpSocket; LOGD("before delete audioIO"); delete m_audioIO; m_audioInput = nullptr; m_audioOutput = nullptr; for (std::shared_ptr<Stream>& stream : m_incomingStreams) { LOGD("before stop decoder"); if (stream->decoder != nullptr) stream->decoder->Stop(); } LOGD("before delete encoder"); if (m_encoder != nullptr) { m_encoder->Stop(); delete m_encoder; } LOGD("before delete echo canceller"); if (m_echoCanceller != nullptr) { m_echoCanceller->Stop(); delete m_echoCanceller; } delete m_congestionControl; if (m_statsDump != nullptr) fclose(m_statsDump); delete m_selectCanceller; LOGD("Left VoIPController::~VoIPController"); if (tgvoipLogFile != nullptr) { FILE* log = tgvoipLogFile; tgvoipLogFile = nullptr; fclose(log); } #if defined(TGVOIP_USE_CALLBACK_AUDIO_IO) if (m_preprocDecoder) { opus_decoder_destroy(m_preprocDecoder); m_preprocDecoder = nullptr; } #endif } VoIPController::Config::Config(double initTimeout, double recvTimeout, DataSaving dataSaving, bool enableAEC, bool enableNS, bool enableAGC, bool enableCallUpgrade) : initTimeout(initTimeout) , recvTimeout(recvTimeout) , dataSaving(dataSaving) , enableAEC(enableAEC) , enableNS(enableNS) , enableAGC(enableAGC) , enableCallUpgrade(enableCallUpgrade) { } VoIPController::PendingOutgoingPacket::PendingOutgoingPacket(std::uint32_t seq, PktType type, std::size_t len, Buffer&& data, std::int64_t endpoint) : seq(seq) , type(type) , len(len) , data(std::move(data)) , endpoint(endpoint) { } VoIPController::PendingOutgoingPacket::PendingOutgoingPacket(PendingOutgoingPacket&& other) noexcept : seq(other.seq) , type(other.type) , len(other.len) , data(std::move(other.data)) , endpoint(other.endpoint) { } VoIPController::PendingOutgoingPacket& VoIPController::PendingOutgoingPacket::operator=(PendingOutgoingPacket&& other) noexcept { if (this != &other) { seq = other.seq; type = other.type; len = other.len; data = std::move(other.data); endpoint = other.endpoint; } return *this; } void VoIPController::Stop() { LOGD("Entered VoIPController::Stop"); m_stopping = true; m_runReceiver = false; LOGD("before shutdown socket"); if (m_udpSocket) m_udpSocket->Close(); if (m_realUdpSocket != m_udpSocket) m_realUdpSocket->Close(); m_selectCanceller->CancelSelect(); m_rawSendQueue.Put(RawPendingOutgoingPacket{ .packet = NetworkPacket::Empty(), .socket = nullptr }); LOGD("before join sendThread"); if (m_sendThread) { m_sendThread->Join(); delete m_sendThread; } LOGD("before join recvThread"); if (m_recvThread) { m_recvThread->Join(); delete m_recvThread; } LOGD("before stop messageThread"); m_messageThread.Stop(); { LOGD("Before stop audio I/O"); MutexGuard m(m_audioIOMutex); if (m_audioInput) { m_audioInput->Stop(); m_audioInput->SetCallback(nullptr, nullptr); } if (m_audioOutput) { m_audioOutput->Stop(); m_audioOutput->SetCallback(nullptr, nullptr); } } if (m_videoPacketSender) { LOGD("before delete video packet sender"); delete m_videoPacketSender; m_videoPacketSender = nullptr; } LOGD("Left VoIPController::Stop [need rate = %d]", m_needRate); } bool VoIPController::NeedRate() { return m_needRate && ServerConfig::GetSharedInstance()->GetBoolean("bad_call_rating", false); } std::int32_t VoIPController::GetConnectionMaxLayer() { return 92; } void VoIPController::SetRemoteEndpoints(const std::vector<Endpoint>& endpoints, bool allowP2p, std::int32_t connectionMaxLayer) { LOGW("Set remote endpoints, allowP2P=%d, connectionMaxLayer=%u", allowP2p ? 1 : 0, connectionMaxLayer); assert(!m_runReceiver); m_preferredRelay = 0; m_endpoints.clear(); m_didAddTcpRelays = false; m_useTCP = true; for (const Endpoint& endpoint : endpoints) { if (m_endpoints.find(endpoint.id) != m_endpoints.end()) LOGE("Endpoint IDs are not unique!"); m_endpoints[endpoint.id] = endpoint; if (m_currentEndpoint == 0) m_currentEndpoint = endpoint.id; if (endpoint.type == Endpoint::Type::TCP_RELAY) m_didAddTcpRelays = true; if (endpoint.type == Endpoint::Type::UDP_RELAY) m_useTCP = false; LOGV("Adding endpoint: %s:%d, %s", endpoint.address.ToString().c_str(), endpoint.port, endpoint.type == Endpoint::Type::UDP_RELAY ? "UDP" : "TCP"); } m_preferredRelay = m_currentEndpoint; this->m_allowP2p = allowP2p; this->m_connectionMaxLayer = connectionMaxLayer; if (connectionMaxLayer >= 74) { m_useMTProto2 = true; } AddIPv6Relays(); } void VoIPController::Start() { LOGW("Starting voip controller"); m_udpSocket->Open(); if (m_udpSocket->IsFailed()) { SetState(State::FAILED); return; } m_runReceiver = true; m_recvThread = new Thread(std::bind(&VoIPController::RunRecvThread, this)); m_recvThread->SetName("VoipRecv"); m_recvThread->Start(); m_messageThread.Start(); } void VoIPController::Connect() { assert(m_state != State::WAIT_INIT_ACK); m_connectionInitTime = GetCurrentTime(); if (m_config.initTimeout == 0.0) { LOGE("Init timeout is 0 -- did you forget to set config?"); m_config.initTimeout = 30.0; } m_sendThread = new Thread(std::bind(&VoIPController::RunSendThread, this)); m_sendThread->SetName("VoipSend"); m_sendThread->Start(); } void VoIPController::SetEncryptionKey(char* key, bool isOutgoing) { std::memcpy(m_encryptionKey, key, 256); std::uint8_t sha1[SHA1_LENGTH]; crypto.sha1(reinterpret_cast<std::uint8_t*>(m_encryptionKey), 256, sha1); std::memcpy(m_keyFingerprint, sha1 + (SHA1_LENGTH - 8), 8); std::uint8_t sha256[SHA256_LENGTH]; crypto.sha256(reinterpret_cast<std::uint8_t*>(m_encryptionKey), 256, sha256); std::memcpy(m_callID, sha256 + (SHA256_LENGTH - 16), 16); this->m_isOutgoing = isOutgoing; } void VoIPController::SetNetworkType(NetType type) { m_networkType = type; UpdateDataSavingState(); UpdateAudioBitrateLimit(); m_myIPv6 = NetworkAddress::Empty(); std::string itfName = m_udpSocket->GetLocalInterfaceInfo(nullptr, &m_myIPv6); LOGI("set network type: %s, active interface %s", NetworkTypeToString(type).c_str(), itfName.c_str()); LOGI("Local IPv6 address: %s", m_myIPv6.ToString().c_str()); if (IS_MOBILE_NETWORK(m_networkType)) { CellularCarrierInfo carrier = GetCarrierInfo(); if (!carrier.name.empty()) { LOGI("Carrier: %s [%s; mcc=%s, mnc=%s]", carrier.name.c_str(), carrier.countryCode.c_str(), carrier.mcc.c_str(), carrier.mnc.c_str()); } } if (itfName != m_activeNetItfName) { m_udpSocket->OnActiveInterfaceChanged(); LOGI("Active network interface changed: %s -> %s", m_activeNetItfName.c_str(), itfName.c_str()); bool isFirstChange = m_activeNetItfName.length() == 0 && m_state != State::ESTABLISHED && m_state != State::RECONNECTING; m_activeNetItfName = itfName; if (isFirstChange) return; m_messageThread.Post([this] { m_wasNetworkHandover = true; if (m_currentEndpoint != 0) { const Endpoint& _currentEndpoint = m_endpoints.at(m_currentEndpoint); const Endpoint& _preferredRelay = m_endpoints.at(m_preferredRelay); if (_currentEndpoint.type != Endpoint::Type::UDP_RELAY) { if (_preferredRelay.type == Endpoint::Type::UDP_RELAY) m_currentEndpoint = m_preferredRelay; MutexGuard m(m_endpointsMutex); constexpr std::int64_t lanID = static_cast<std::int64_t>(FOURCC('L', 'A', 'N', '4')) << 32; m_endpoints.erase(lanID); for (std::pair<const std::int64_t, Endpoint>& e : m_endpoints) { Endpoint& endpoint = e.second; if (endpoint.type == Endpoint::Type::UDP_RELAY && m_useTCP) { m_useTCP = false; if (_preferredRelay.type == Endpoint::Type::TCP_RELAY) { m_preferredRelay = m_currentEndpoint = endpoint.id; } } else if (endpoint.type == Endpoint::Type::TCP_RELAY && endpoint.m_socket) { endpoint.m_socket->Close(); } endpoint.m_averageRTT = 0; endpoint.m_rtts.Reset(); } } } m_lastUdpPingTime = 0; if (m_proxyProtocol == Proxy::SOCKS5) InitUDPProxy(); if (m_allowP2p && m_currentEndpoint) { SendPublicEndpointsRequest(); } BufferOutputStream s(4); s.WriteInt32(m_dataSavingMode ? INIT_FLAG_DATA_SAVING_ENABLED : 0); if (m_peerVersion < 6) { SendPacketReliably(PktType::NETWORK_CHANGED, s.GetBuffer(), s.GetLength(), 1, 20); } else { Buffer buf(std::move(s)); SendExtra(buf, ExtraType::NETWORK_CHANGED); } m_needReInitUdpProxy = true; m_selectCanceller->CancelSelect(); m_didSendIPv6Endpoint = false; AddIPv6Relays(); ResetUdpAvailability(); ResetEndpointPingStats(); }); } } double VoIPController::GetAverageRTT() { ENFORCE_MSG_THREAD; if (m_lastSentSeq >= m_lastRemoteAckSeq) { std::uint32_t diff = m_lastSentSeq - m_lastRemoteAckSeq; if (diff < 32) { double res = 0; int count = 0; for (const RecentOutgoingPacket& packet : m_recentOutgoingPackets) { if (packet.ackTime > 0) { res += (packet.ackTime - packet.sendTime); ++count; } } if (count > 0) res /= count; return res; } } return 999; } void VoIPController::SetMicMute(bool mute) { if (m_micMuted == mute) return; m_micMuted = mute; if (m_audioInput) { if (mute) m_audioInput->Stop(); else m_audioInput->Start(); if (!m_audioInput->IsInitialized()) { m_lastError = Error::AUDIO_IO; SetState(State::FAILED); return; } } if (m_echoCanceller) m_echoCanceller->Enable(!mute); if (m_state == State::ESTABLISHED) { m_messageThread.Post([this] { for (std::shared_ptr<Stream>& s : m_outgoingStreams) { if (s->type != StreamType::AUDIO) continue; s->enabled = !m_micMuted; if (m_peerVersion < 6) { std::uint8_t buf[2]; buf[0] = s->id; buf[1] = (m_micMuted ? 0 : 1); SendPacketReliably(PktType::STREAM_STATE, buf, 2, 0.5, 20); } else { SendStreamFlags(*s); } } }); } } std::string VoIPController::GetDebugString() { std::string r = "Remote endpoints: \n"; char buffer[2048]; MutexGuard m(m_endpointsMutex); for (auto& [_, endpoint] : m_endpoints) { std::string type; switch (endpoint.type) { case Endpoint::Type::UDP_P2P_INET: type = "UDP_P2P_INET"; break; case Endpoint::Type::UDP_P2P_LAN: type = "UDP_P2P_LAN"; break; case Endpoint::Type::UDP_RELAY: type = "UDP_RELAY"; break; case Endpoint::Type::TCP_RELAY: type = "TCP_RELAY"; break; // default: // type = "UNKNOWN"; // break; } std::snprintf(buffer, sizeof(buffer), "%s:%u %dms %d 0x%" PRIx64 " [%s%s]\n", endpoint.address.IsEmpty() ? ("[" + endpoint.v6address.ToString() + "]").c_str() : endpoint.address.ToString().c_str(), endpoint.port, static_cast<int>(endpoint.m_averageRTT * 1000), endpoint.m_udpPongCount, static_cast<std::uint64_t>(endpoint.id), type.c_str(), m_currentEndpoint == endpoint.id ? ", IN_USE" : ""); r += buffer; } if (m_shittyInternetMode) { std::snprintf(buffer, sizeof(buffer), "ShittyInternetMode: level %d\n", m_extraEcLevel); r += buffer; } double avgLate[3]; std::shared_ptr<Stream> stream = GetStreamByType(StreamType::AUDIO, false); std::shared_ptr<JitterBuffer> jitterBuffer; if (stream != nullptr) jitterBuffer = stream->jitterBuffer; if (jitterBuffer != nullptr) jitterBuffer->GetAverageLateCount(avgLate); else std::memset(avgLate, 0, 3 * sizeof(double)); std::snprintf( buffer, sizeof(buffer), "Jitter buffer: %d/%.2f | %.1f, %.1f, %.1f\n" "RTT avg/min: %d/%d\n" "Congestion window: %d/%d bytes\n" "Key fingerprint: %02hhX%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX%s\n" "Last sent/ack'd seq: %u/%u\n" "Last recvd seq: %u\n" "Send/recv losses: %u/%u (%d%%)\n" "Audio bitrate: %d kbit\n" "Outgoing queue: %u\n" "Frame size out/in: %d/%d\n" "Bytes sent/recvd: %llu/%llu", jitterBuffer ? jitterBuffer->GetMinPacketCount() : 0, jitterBuffer ? jitterBuffer->GetAverageDelay() : 0, avgLate[0], avgLate[1], avgLate[2], static_cast<int>(m_congestionControl->GetAverageRTT() * 1000), static_cast<int>(m_congestionControl->GetMinimumRTT() * 1000), static_cast<int>(m_congestionControl->GetInflightDataSize()), static_cast<int>(m_congestionControl->GetCongestionWindow()), m_keyFingerprint[0], m_keyFingerprint[1], m_keyFingerprint[2], m_keyFingerprint[3], m_keyFingerprint[4], m_keyFingerprint[5], m_keyFingerprint[6], m_keyFingerprint[7], m_useMTProto2 ? " (MTProto2.0)" : "", m_lastSentSeq, m_lastRemoteAckSeq, m_lastRemoteSeq, m_sendLosses, m_recvLossCount, m_encoder ? m_encoder->GetPacketLoss() : 0, m_encoder ? (m_encoder->GetBitrate() / 1000) : 0, m_unsentStreamPackets.load(), m_outgoingStreams[0]->frameDuration, !m_incomingStreams.empty() ? m_incomingStreams[0]->frameDuration : 0, static_cast<unsigned long long>(m_stats.bytesSentMobile + m_stats.bytesSentWifi), static_cast<unsigned long long>(m_stats.bytesRecvdMobile + m_stats.bytesRecvdWifi) ); r += buffer; if (m_config.enableVideoSend) { std::shared_ptr<Stream> vstm = GetStreamByType(StreamType::VIDEO, true); if (vstm != nullptr && vstm->enabled && m_videoPacketSender) { std::snprintf(buffer, sizeof(buffer), "\nVideo out: %ux%u '%c%c%c%c' %u kbit", vstm->width, vstm->height, PRINT_FOURCC(vstm->codec), m_videoPacketSender->GetBitrate()); r += buffer; } } if (!m_peerVideoDecoders.empty()) { r += "\nPeer codecs: "; for (std::uint32_t codec : m_peerVideoDecoders) { std::snprintf(buffer, sizeof(buffer), "'%c%c%c%c' ", PRINT_FOURCC(codec)); r += buffer; } } if (m_config.enableVideoReceive) { std::shared_ptr<Stream> vstm = GetStreamByType(StreamType::VIDEO, false); if (vstm != nullptr && vstm->enabled) { std::snprintf(buffer, sizeof(buffer), "\nVideo in: %ux%u '%c%c%c%c'", vstm->width, vstm->height, PRINT_FOURCC(vstm->codec)); r += buffer; } } return r; } const char* VoIPController::GetVersion() { return LIBTGVOIP_VERSION; } std::int64_t VoIPController::GetPreferredRelayID() { return m_preferredRelay; } Error VoIPController::GetLastError() { return m_lastError; } void VoIPController::GetStats(TrafficStats* stats) { std::memcpy(stats, &this->m_stats, sizeof(TrafficStats)); } std::string VoIPController::GetDebugLog() { std::map<std::string, json11::Json> network { {"type", NetworkTypeToString(m_networkType)}}; if (IS_MOBILE_NETWORK(m_networkType)) { CellularCarrierInfo carrier = GetCarrierInfo(); if (!carrier.name.empty()) { network["carrier"] = carrier.name; network["country"] = carrier.countryCode; network["mcc"] = carrier.mcc; network["mnc"] = carrier.mnc; } } else if (m_networkType == NetType::WIFI) { #ifdef __ANDROID__ jni::DoWithJNI([&](JNIEnv* env) { jmethodID getWifiInfoMethod = env->GetStaticMethodID(jniUtilitiesClass, "getWifiInfo", "()[I"); jintArray res = static_cast<jintArray>(env->CallStaticObjectMethod(jniUtilitiesClass, getWifiInfoMethod)); if (res) { jint* wifiInfo = env->GetIntArrayElements(res, nullptr); network["rssi"] = wifiInfo[0]; network["link_speed"] = wifiInfo[1]; env->ReleaseIntArrayElements(res, wifiInfo, JNI_ABORT); } }); #endif } std::vector<json11::Json> endpointsJson; for (auto& [_, endpoint] : m_endpoints) { std::string type; std::map<std::string, json11::Json> je { { "rtt", static_cast<int>(endpoint.m_averageRTT * 1000) } }; std::int64_t id = 0; if (endpoint.type == Endpoint::Type::UDP_RELAY) { je["type"] = endpoint.IsIPv6Only() ? "udp_relay6" : "udp_relay"; id = endpoint.CleanID(); if (endpoint.m_totalUdpPings == 0) je["udp_pings"] = 0.0; else je["udp_pings"] = static_cast<double>(endpoint.m_totalUdpPingReplies) / endpoint.m_totalUdpPings; je["self_rtt"] = static_cast<int>(endpoint.m_selfRtts.Average() * 1000); } else if (endpoint.type == Endpoint::Type::TCP_RELAY) { je["type"] = endpoint.IsIPv6Only() ? "tcp_relay6" : "tcp_relay"; id = endpoint.CleanID(); } else if (endpoint.type == Endpoint::Type::UDP_P2P_INET) { je["type"] = endpoint.IsIPv6Only() ? "p2p_inet6" : "p2p_inet"; } else if (endpoint.type == Endpoint::Type::UDP_P2P_LAN) { je["type"] = "p2p_lan"; } if (m_preferredRelay == endpoint.id && m_wasEstablished) je["pref"] = true; if (id) { std::ostringstream s; s << id; je["id"] = s.str(); } endpointsJson.emplace_back(je); } std::string p2pType = "none"; Endpoint& cur = m_endpoints[m_currentEndpoint]; if (cur.type == Endpoint::Type::UDP_P2P_INET) p2pType = cur.IsIPv6Only() ? "inet6" : "inet"; else if (cur.type == Endpoint::Type::UDP_P2P_LAN) p2pType = "lan"; std::vector<std::string> problems; if (m_lastError == Error::TIMEOUT) problems.emplace_back("timeout"); if (m_wasReconnecting) problems.emplace_back("reconnecting"); if (m_wasExtraEC) problems.emplace_back("extra_ec"); if (m_wasEncoderLaggy) problems.emplace_back("encoder_lag"); if (!m_wasEstablished) problems.emplace_back("not_inited"); if (m_wasNetworkHandover) problems.emplace_back("network_handover"); return json11::Json(json11::Json::object { { "log_type", "call_stats" }, { "libtgvoip_version", LIBTGVOIP_VERSION }, { "network", network }, { "protocol_version", std::min(m_peerVersion, PROTOCOL_VERSION) }, { "udp_avail", m_udpConnectivityState == UdpState::AVAILABLE }, { "tcp_used", m_useTCP }, { "p2p_type", p2pType }, { "packet_stats", json11::Json::object { { "out", static_cast<int>(m_seq) }, { "in", static_cast<int>(m_packetsReceived) }, { "lost_out", static_cast<int>(m_congestionControl->GetSendLossCount()) }, { "lost_in", static_cast<int>(m_recvLossCount) } } }, { "endpoints", endpointsJson }, { "problems", problems } }).dump(); } std::vector<AudioInputDevice> VoIPController::EnumerateAudioInputs() { std::vector<AudioInputDevice> devs; audio::AudioInput::EnumerateDevices(devs); return devs; } std::vector<AudioOutputDevice> VoIPController::EnumerateAudioOutputs() { std::vector<AudioOutputDevice> devs; audio::AudioOutput::EnumerateDevices(devs); return devs; } void VoIPController::SetCurrentAudioInput(std::string id) { m_currentAudioInput = std::move(id); if (m_audioInput != nullptr) m_audioInput->SetCurrentDevice(m_currentAudioInput); } void VoIPController::SetCurrentAudioOutput(std::string id) { m_currentAudioOutput = std::move(id); if (m_audioOutput) m_audioOutput->SetCurrentDevice(m_currentAudioOutput); } std::string VoIPController::GetCurrentAudioInputID() const { return m_currentAudioInput; } std::string VoIPController::GetCurrentAudioOutputID() const { return m_currentAudioOutput; } void VoIPController::SetProxy(Proxy protocol, std::string address, std::uint16_t port, std::string username, std::string password) { m_proxyProtocol = protocol; m_proxyAddress = std::move(address); m_proxyPort = port; m_proxyUsername = std::move(username); m_proxyPassword = std::move(password); } int VoIPController::GetSignalBarsCount() { return m_signalBarsHistory.NonZeroAverage(); } void VoIPController::SetCallbacks(VoIPController::Callbacks callbacks) { m_callbacks = callbacks; if (callbacks.connectionStateChanged) callbacks.connectionStateChanged(this, m_state); } float VoIPController::GetOutputLevel() const { return 0.0f; } void VoIPController::SetAudioOutputGainControlEnabled(bool enabled) { LOGD("New output AGC state: %d", enabled); } std::uint32_t VoIPController::GetPeerCapabilities() { return m_peerCapabilities; } void VoIPController::SendGroupCallKey(std::uint8_t* key) { Buffer buf(256); buf.CopyFrom(key, 0, 256); std::shared_ptr<Buffer> keyPtr = std::make_shared<Buffer>(std::move(buf)); m_messageThread.Post([this, keyPtr] { if (!(m_peerCapabilities & TGVOIP_PEER_CAP_GROUP_CALLS)) { LOGE("Tried to send group call key but peer isn't capable of them"); return; } if (m_didSendGroupCallKey) { LOGE("Tried to send a group call key repeatedly"); return; } if (!m_isOutgoing) { LOGE("You aren't supposed to send group call key in an incoming call, use VoIPController::RequestCallUpgrade() instead"); return; } m_didSendGroupCallKey = true; SendExtra(*keyPtr, ExtraType::GROUP_CALL_KEY); }); } void VoIPController::RequestCallUpgrade() { m_messageThread.Post([this] { if (!(m_peerCapabilities & TGVOIP_PEER_CAP_GROUP_CALLS)) { LOGE("Tried to send group call key but peer isn't capable of them"); return; } if (m_didSendUpgradeRequest) { LOGE("Tried to send upgrade request repeatedly"); return; } if (m_isOutgoing) { LOGE("You aren't supposed to send an upgrade request in an outgoing call, generate an encryption key and use VoIPController::SendGroupCallKey instead"); return; } m_didSendUpgradeRequest = true; Buffer empty(0); SendExtra(empty, ExtraType::REQUEST_GROUP); }); } void VoIPController::SetEchoCancellationStrength(int strength) { m_echoCancellationStrength = strength; if (m_echoCanceller != nullptr) m_echoCanceller->SetAECStrength(strength); } #if defined(TGVOIP_USE_CALLBACK_AUDIO_IO) void VoIPController::SetAudioDataCallbacks(std::function<void(std::int16_t*, std::size_t)> input, std::function<void(std::int16_t*, std::size_t)> output, std::function<void(std::int16_t*, std::size_t)> preproc = nullptr) { m_audioInputDataCallback = std::move(input); m_audioOutputDataCallback = std::move(output); m_audioPreprocDataCallback = std::move(preproc); m_preprocDecoder = m_preprocDecoder ? m_preprocDecoder : opus_decoder_create(48000, 1, nullptr); } #endif State VoIPController::GetConnectionState() const { return m_state; } void VoIPController::SetConfig(const Config& cfg) { m_config = cfg; if (tgvoipLogFile) { fclose(tgvoipLogFile); tgvoipLogFile = nullptr; } if (!m_config.logFilePath.empty()) { #ifndef _WIN32 tgvoipLogFile = fopen(m_config.logFilePath.c_str(), "a"); #else if (_wfopen_s(&tgvoipLogFile, config.logFilePath.c_str(), L"a") != 0) { tgvoipLogFile = nullptr; } #endif tgvoip_log_file_write_header(tgvoipLogFile); } else { tgvoipLogFile = nullptr; } if (m_statsDump != nullptr) { std::fclose(m_statsDump); m_statsDump = nullptr; } if (!m_config.statsDumpFilePath.empty()) { #ifndef _WIN32 m_statsDump = fopen(m_config.statsDumpFilePath.c_str(), "w"); #else if (_wfopen_s(&statsDump, config.statsDumpFilePath.c_str(), L"w") != 0) { statsDump = nullptr; } #endif if (m_statsDump != nullptr) std::fprintf(m_statsDump, "Time\tRTT\tLRSeq\tLSSeq\tLASeq\tLostR\tLostS\tCWnd\tBitrate\tLoss%%\tJitter\tJDelay\tAJDelay\n"); } else { m_statsDump = nullptr; } UpdateDataSavingState(); UpdateAudioBitrateLimit(); } void VoIPController::SetPersistentState(const std::vector<std::uint8_t>& state) { using namespace json11; if (state.empty()) return; std::string jsonErr; std::string json = std::string(state.begin(), state.end()); Json _obj = Json::parse(json, jsonErr); if (!jsonErr.empty()) { LOGE("Error parsing persistable state: %s", jsonErr.c_str()); return; } Json::object obj = _obj.object_items(); if (obj.find("proxy") != obj.end()) { Json::object proxy = obj["proxy"].object_items(); m_lastTestedProxyServer = proxy["server"].string_value(); m_proxySupportsUDP = proxy["udp"].bool_value(); m_proxySupportsTCP = proxy["tcp"].bool_value(); } } std::vector<std::uint8_t> VoIPController::GetPersistentState() { using namespace json11; Json::object obj = Json::object { {"ver", 1}, }; if (m_proxyProtocol == Proxy::SOCKS5) { char pbuf[128]; std::snprintf(pbuf, sizeof(pbuf), "%s:%u", m_proxyAddress.c_str(), m_proxyPort); obj.insert({"proxy", Json::object {{"server", std::string(pbuf)}, {"udp", m_proxySupportsUDP}, {"tcp", m_proxySupportsTCP}}}); } std::string _jstr = Json(obj).dump(); const char* jstr = _jstr.c_str(); return std::vector<std::uint8_t>(jstr, jstr + strlen(jstr)); } void VoIPController::SetOutputVolume(float level) { m_outputVolume.SetLevel(level); } void VoIPController::SetInputVolume(float level) { m_inputVolume.SetLevel(level); } #if defined(__APPLE__) && TARGET_OS_OSX void VoIPController::SetAudioOutputDuckingEnabled(bool enabled) { macAudioDuckingEnabled = enabled; audio::AudioUnitIO* osxAudio = dynamic_cast<audio::AudioUnitIO*>(audioIO); if (osxAudio) { osxAudio->SetDuckingEnabled(enabled); } } #endif #pragma mark - Internal intialization void VoIPController::InitializeTimers() { m_initTimeoutID = m_messageThread.Post([this] { LOGW("Init timeout, disconnecting"); m_lastError = Error::TIMEOUT; SetState(State::FAILED); }, m_config.initTimeout); if (!m_config.statsDumpFilePath.empty()) { m_messageThread.Post([this] { if (m_statsDump != nullptr && m_incomingStreams.size() == 1) { std::shared_ptr<JitterBuffer>& jitterBuffer = m_incomingStreams[0]->jitterBuffer; std::fprintf(m_statsDump, "%.3f\t%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%.3f\t%.3f\t%.3f\n", GetCurrentTime() - m_connectionInitTime, m_endpoints.at(m_currentEndpoint).m_rtts[0], m_lastRemoteSeq, m_seq.load(), m_lastRemoteAckSeq, m_recvLossCount, m_congestionControl ? m_congestionControl->GetSendLossCount() : 0, m_congestionControl ? static_cast<int>(m_congestionControl->GetInflightDataSize()) : 0, m_encoder ? m_encoder->GetBitrate() : 0, m_encoder ? m_encoder->GetPacketLoss() : 0, jitterBuffer ? jitterBuffer->GetLastMeasuredJitter() : 0, jitterBuffer ? jitterBuffer->GetLastMeasuredDelay() * 0.06 : 0, jitterBuffer ? jitterBuffer->GetAverageDelay() * 0.06 : 0); } }, 0.1, 0.1); } m_messageThread.Post(std::bind(&VoIPController::SendRelayPings, this), 0.0, 2.0); } void VoIPController::RunSendThread() { InitializeAudio(); InitializeTimers(); m_messageThread.Post(std::bind(&VoIPController::SendInit, this)); while (true) { RawPendingOutgoingPacket pkt = m_rawSendQueue.GetBlocking(); if (pkt.packet.IsEmpty()) break; if (IS_MOBILE_NETWORK(m_networkType)) m_stats.bytesSentMobile += static_cast<std::uint64_t>(pkt.packet.data.Length()); else m_stats.bytesSentWifi += static_cast<std::uint64_t>(pkt.packet.data.Length()); if (pkt.packet.protocol == NetworkProtocol::TCP) { if (pkt.socket != nullptr && !pkt.socket->IsFailed()) { pkt.socket->Send(std::move(pkt.packet)); } } else { m_udpSocket->Send(std::move(pkt.packet)); } } LOGI("=== send thread exiting ==="); } #pragma mark - Miscellaneous void VoIPController::SetState(State state) { this->m_state = state; LOGV("Call state changed to %d", static_cast<int>(state)); m_stateChangeTime = GetCurrentTime(); m_messageThread.Post([this, state] { if (m_callbacks.connectionStateChanged) m_callbacks.connectionStateChanged(this, state); }); if (state == State::ESTABLISHED) { SetMicMute(m_micMuted); if (!m_wasEstablished) { m_wasEstablished = true; m_messageThread.Post(std::bind(&VoIPController::UpdateRTT, this), 0.1, 0.5); m_messageThread.Post(std::bind(&VoIPController::UpdateAudioBitrate, this), 0.0, 0.3); m_messageThread.Post(std::bind(&VoIPController::UpdateCongestion, this), 0.0, 1.0); m_messageThread.Post(std::bind(&VoIPController::UpdateSignalBars, this), 1.0, 1.0); m_messageThread.Post(std::bind(&VoIPController::TickJitterBufferAndCongestionControl, this), 0.0, 0.1); } } } void VoIPController::SendStreamFlags(Stream& stream) { ENFORCE_MSG_THREAD; BufferOutputStream s(5); s.WriteUInt8(stream.id); std::int32_t flags = 0; if (stream.enabled) flags |= STREAM_FLAG_ENABLED; if (stream.extraECEnabled) flags |= STREAM_FLAG_EXTRA_EC; if (stream.paused) flags |= STREAM_FLAG_PAUSED; s.WriteInt32(flags); LOGV("My stream state: id %u flags %u", stream.id, flags); Buffer buf(std::move(s)); SendExtra(buf, ExtraType::STREAM_FLAGS); } std::shared_ptr<VoIPController::Stream> VoIPController::GetStreamByType(StreamType type, bool outgoing) const { for (const std::shared_ptr<Stream>& ss : (outgoing ? m_outgoingStreams : m_incomingStreams)) if (ss->type == type) return ss; return std::shared_ptr<Stream>(); } std::shared_ptr<VoIPController::Stream> VoIPController::GetStreamByID(std::uint8_t id, bool outgoing) const { for (const std::shared_ptr<Stream>& ss : (outgoing ? m_outgoingStreams : m_incomingStreams)) if (ss->id == id) return ss; return std::shared_ptr<Stream>(); } CellularCarrierInfo VoIPController::GetCarrierInfo() { #if defined(__APPLE__) && TARGET_OS_IOS return DarwinSpecific::GetCarrierInfo(); #elif defined(__ANDROID__) CellularCarrierInfo carrier; jni::DoWithJNI([&carrier](JNIEnv* env) { jmethodID getCarrierInfoMethod = env->GetStaticMethodID(jniUtilitiesClass, "getCarrierInfo", "()[Ljava/lang/String;"); jobjectArray jinfo = (jobjectArray)env->CallStaticObjectMethod(jniUtilitiesClass, getCarrierInfoMethod); if (jinfo && env->GetArrayLength(jinfo) == 4) { carrier.name = jni::JavaStringToStdString(env, (jstring)env->GetObjectArrayElement(jinfo, 0)); carrier.countryCode = jni::JavaStringToStdString(env, (jstring)env->GetObjectArrayElement(jinfo, 1)); carrier.mcc = jni::JavaStringToStdString(env, (jstring)env->GetObjectArrayElement(jinfo, 2)); carrier.mnc = jni::JavaStringToStdString(env, (jstring)env->GetObjectArrayElement(jinfo, 3)); } else { LOGW("Failed to get carrier info"); } }); return carrier; #else return CellularCarrierInfo(); #endif } #pragma mark - Audio I/O void VoIPController::HandleAudioInput(std::uint8_t* data, std::size_t len, std::uint8_t* secondaryData, std::size_t secondaryLen) { if (m_stopping) return; // TODO make an AudioPacketSender bool hasSecondaryData = (secondaryLen != 0 && secondaryData != nullptr); Buffer dataBuf = m_outgoingAudioBufferPool.Get(); Buffer secondaryDataBuf = hasSecondaryData ? m_outgoingAudioBufferPool.Get() : Buffer(); dataBuf.CopyFrom(data, 0, len); if (hasSecondaryData) { secondaryDataBuf.CopyFrom(secondaryData, 0, secondaryLen); } std::shared_ptr<Buffer> dataBufPtr = std::make_shared<Buffer>(std::move(dataBuf)); std::shared_ptr<Buffer> secondaryDataBufPtr = std::make_shared<Buffer>(std::move(secondaryDataBuf)); m_messageThread.Post([this, dataBufPtr, secondaryDataBufPtr, len, secondaryLen]() { m_unsentStreamPacketsHistory.Add(m_unsentStreamPackets); if (m_unsentStreamPacketsHistory.Average() >= m_maxUnsentStreamPackets && m_videoPacketSender == nullptr) { LOGW("Resetting stalled send queue"); m_sendQueue.clear(); m_unsentStreamPacketsHistory.Reset(); m_unsentStreamPackets = 0; } if (m_waitingForAcks || m_dontSendPackets > 0 || (m_unsentStreamPackets >= m_maxUnsentStreamPackets /*&& endpoints[currentEndpoint].type==Endpoint::Type::TCP_RELAY*/)) { LOGV("waiting for queue, dropping outgoing audio packet, %d %d %d [%d]", m_unsentStreamPackets.load(), m_waitingForAcks, m_dontSendPackets, m_maxUnsentStreamPackets); return; } if (!m_receivedInitAck) return; BufferOutputStream pkt(1500); bool hasExtraFEC = m_peerVersion >= 7 && (secondaryLen != 0) && m_shittyInternetMode; std::uint8_t flags = static_cast<std::uint8_t>((len > 255 || hasExtraFEC) ? STREAM_DATA_FLAG_LEN16 : 0); pkt.WriteUInt8(1 | flags); // streamID + flags if (len > 255 || hasExtraFEC) { std::int16_t lenAndFlags = static_cast<std::int16_t>(len); if (hasExtraFEC) lenAndFlags |= STREAM_DATA_XFLAG_EXTRA_FEC; pkt.WriteInt16(lenAndFlags); } else { pkt.WriteUInt8(static_cast<std::uint8_t>(len)); } pkt.WriteUInt32(m_audioTimestampOut); pkt.WriteBytes(*dataBufPtr, 0, len); if (hasExtraFEC) { pkt.WriteUInt8(static_cast<std::uint8_t>(std::min(static_cast<int>(m_ecAudioPackets.size()), m_extraEcLevel))); for (auto ecData = m_ecAudioPackets.begin() + std::max(0, static_cast<int>(m_ecAudioPackets.size()) - m_extraEcLevel); ecData != m_ecAudioPackets.end(); ++ecData) { pkt.WriteUInt8(static_cast<std::uint8_t>(ecData->Length())); pkt.WriteBytes(*ecData); } Buffer ecBuf(secondaryLen); ecBuf.CopyFrom(**secondaryDataBufPtr, 0, secondaryLen); m_ecAudioPackets.emplace_back(std::move(ecBuf)); while (m_ecAudioPackets.size() > 4) m_ecAudioPackets.pop_front(); } ++m_unsentStreamPackets; std::size_t pktLength = pkt.GetLength(); PendingOutgoingPacket p { /*.seq=*/ GenerateOutSeq(), /*.type=*/ PktType::STREAM_DATA, /*.len=*/ pktLength, /*.data=*/ Buffer(std::move(pkt)), /*.endpoint=*/0, }; m_congestionControl->PacketSent(p.seq, p.len); SendOrEnqueuePacket(std::move(p)); if (m_peerVersion < 7 && secondaryLen != 0 && m_shittyInternetMode) { Buffer ecBuf(secondaryLen); ecBuf.CopyFrom(*secondaryDataBufPtr, 0, secondaryLen); m_ecAudioPackets.emplace_back(std::move(ecBuf)); while (m_ecAudioPackets.size() > 4) m_ecAudioPackets.pop_front(); pkt = BufferOutputStream(1500); pkt.WriteUInt8(m_outgoingStreams[0]->id); pkt.WriteUInt32(m_audioTimestampOut); pkt.WriteUInt8(static_cast<std::uint8_t>(std::min(static_cast<int>(m_ecAudioPackets.size()), m_extraEcLevel))); for (auto ecData = m_ecAudioPackets.begin() + std::max(0, static_cast<int>(m_ecAudioPackets.size()) - m_extraEcLevel); ecData != m_ecAudioPackets.end(); ++ecData) { pkt.WriteUInt8(static_cast<std::uint8_t>(ecData->Length())); pkt.WriteBytes(*ecData); } std::size_t pktLength = pkt.GetLength(); PendingOutgoingPacket p { GenerateOutSeq(), PktType::STREAM_EC, pktLength, Buffer(std::move(pkt)), 0 }; SendOrEnqueuePacket(std::move(p)); } m_audioTimestampOut += m_outgoingStreams[0]->frameDuration; }); #if defined(TGVOIP_USE_CALLBACK_AUDIO_IO) if (m_audioPreprocDataCallback && m_preprocDecoder) { int size = opus_decode(m_preprocDecoder, data, len, m_preprocBuffer, 4096, 0); m_audioPreprocDataCallback(m_preprocBuffer, size); } #endif } void VoIPController::InitializeAudio() { double t = GetCurrentTime(); std::shared_ptr<Stream> outgoingAudioStream = GetStreamByType(StreamType::AUDIO, true); LOGI("before create audio io"); m_audioIO = audio::AudioIO::Create(m_currentAudioInput, m_currentAudioOutput); m_audioInput = m_audioIO->GetInput(); m_audioOutput = m_audioIO->GetOutput(); #ifdef __ANDROID__ audio::AudioInputAndroid* androidInput = dynamic_cast<audio::AudioInputAndroid*>(audioInput); if (androidInput) { unsigned int effects = androidInput->GetEnabledEffects(); if (!(effects & audio::AudioInputAndroid::EFFECT_AEC)) { config.enableAEC = true; LOGI("Forcing software AEC because built-in is not good"); } if (!(effects & audio::AudioInputAndroid::EFFECT_NS)) { config.enableNS = true; LOGI("Forcing software NS because built-in is not good"); } } #elif defined(__APPLE__) && TARGET_OS_OSX SetAudioOutputDuckingEnabled(macAudioDuckingEnabled); #endif LOGI("AEC: %d NS: %d AGC: %d", m_config.enableAEC, m_config.enableNS, m_config.enableAGC); m_echoCanceller = new EchoCanceller(m_config.enableAEC, m_config.enableNS, m_config.enableAGC); m_encoder = new OpusEncoder(m_audioInput, true); m_encoder->SetCallback(std::bind(&VoIPController::HandleAudioInput, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, std::placeholders::_4)); m_encoder->SetOutputFrameDuration(outgoingAudioStream->frameDuration); m_encoder->SetEchoCanceller(m_echoCanceller); m_encoder->SetSecondaryEncoderEnabled(false); if (m_config.enableVolumeControl) { m_encoder->AddAudioEffect(&m_inputVolume); } #if defined(TGVOIP_USE_CALLBACK_AUDIO_IO) dynamic_cast<audio::AudioInputCallback*>(m_audioInput)->SetDataCallback(m_audioInputDataCallback); dynamic_cast<audio::AudioOutputCallback*>(m_audioOutput)->SetDataCallback(m_audioOutputDataCallback); #endif if (!m_audioOutput->IsInitialized()) { LOGE("Error initializing audio playback"); m_lastError = Error::AUDIO_IO; SetState(State::FAILED); return; } UpdateAudioBitrateLimit(); LOGI("Audio initialization took %f seconds", GetCurrentTime() - t); } void VoIPController::StartAudio() { OnAudioOutputReady(); m_encoder->Start(); if (!m_micMuted) { m_audioInput->Start(); if (!m_audioInput->IsInitialized()) { LOGE("Error initializing audio capture"); m_lastError = Error::AUDIO_IO; SetState(State::FAILED); return; } } } void VoIPController::OnAudioOutputReady() { LOGI("Audio I/O ready"); std::shared_ptr<Stream>& stream = m_incomingStreams[0]; stream->decoder = std::make_shared<OpusDecoder>(m_audioOutput, true, m_peerVersion >= 6); stream->decoder->SetEchoCanceller(m_echoCanceller); if (m_config.enableVolumeControl) { stream->decoder->AddAudioEffect(&m_outputVolume); } stream->decoder->SetJitterBuffer(stream->jitterBuffer); stream->decoder->SetFrameDuration(stream->frameDuration); stream->decoder->Start(); } void VoIPController::UpdateAudioOutputState() { bool areAnyAudioStreamsEnabled = false; for (const std::shared_ptr<Stream>& stream : m_incomingStreams) { if (stream->type == StreamType::AUDIO && stream->enabled) { areAnyAudioStreamsEnabled = true; break; } } if (m_audioOutput != nullptr) { LOGV("New audio output state: %d", areAnyAudioStreamsEnabled); if (m_audioOutput->IsPlaying() != areAnyAudioStreamsEnabled) { if (areAnyAudioStreamsEnabled) m_audioOutput->Start(); else m_audioOutput->Stop(); } } } #pragma mark - Bandwidth management void VoIPController::UpdateAudioBitrateLimit() { if (m_encoder != nullptr) { if (m_dataSavingMode || m_dataSavingRequestedByPeer) { m_maxBitrate = m_maxAudioBitrateSaving; m_encoder->SetBitrate(m_initAudioBitrateSaving); } else if (m_networkType == NetType::GPRS) { m_maxBitrate = m_maxAudioBitrateGPRS; m_encoder->SetBitrate(m_initAudioBitrateGPRS); } else if (m_networkType == NetType::EDGE) { m_maxBitrate = m_maxAudioBitrateEDGE; m_encoder->SetBitrate(m_initAudioBitrateEDGE); } else { m_maxBitrate = m_maxAudioBitrate; m_encoder->SetBitrate(m_initAudioBitrate); } m_encoder->SetVadMode(m_dataSavingMode || m_dataSavingRequestedByPeer); if (m_echoCanceller != nullptr) m_echoCanceller->SetVoiceDetectionEnabled(m_dataSavingMode || m_dataSavingRequestedByPeer); } } void VoIPController::UpdateDataSavingState() { if (m_config.dataSaving == DataSaving::ALWAYS) { m_dataSavingMode = true; } else if (m_config.dataSaving == DataSaving::MOBILE) { m_dataSavingMode = m_networkType == NetType::GPRS || m_networkType == NetType::EDGE || m_networkType == NetType::THREE_G || m_networkType == NetType::HSPA || m_networkType == NetType::LTE || m_networkType == NetType::OTHER_MOBILE; } else { m_dataSavingMode = false; } LOGI("update data saving mode, config %d, enabled %d, reqd by peer %d", static_cast<int>(m_config.dataSaving), m_dataSavingMode, m_dataSavingRequestedByPeer); } #pragma mark - Networking & crypto std::uint32_t VoIPController::GenerateOutSeq() { return m_seq++; } void VoIPController::WritePacketHeader(std::uint32_t pseq, BufferOutputStream* s, PktType type, std::uint32_t length, PacketSender* source) { std::uint32_t acks = 0; for (int i = 0; i < 32; ++i) { if (std::find(m_recentIncomingPackets.begin(), m_recentIncomingPackets.end(), m_lastRemoteSeq - static_cast<std::uint32_t>(i + 1)) != m_recentIncomingPackets.end()) { acks |= (1 << (31 - i)); } } if (m_peerVersion >= 8 || (m_peerVersion == 0 && m_connectionMaxLayer >= 92)) { s->WriteUInt8(static_cast<std::uint8_t>(type)); s->WriteUInt32(m_lastRemoteSeq); s->WriteUInt32(pseq); s->WriteUInt32(acks); std::uint8_t flags; if (m_currentExtras.empty()) { flags = 0; } else { flags = XPFLAG_HAS_EXTRA; } std::shared_ptr<Stream> videoStream = GetStreamByType(StreamType::VIDEO, false); if (m_peerVersion >= 9 && videoStream != nullptr && videoStream->enabled) flags |= XPFLAG_HAS_RECV_TS; s->WriteUInt8(flags); if (!m_currentExtras.empty()) { s->WriteUInt8(static_cast<std::uint8_t>(m_currentExtras.size())); for (UnacknowledgedExtraData& x : m_currentExtras) { LOGV("Writing extra into header: type %u, length %d", static_cast<std::uint8_t>(x.type), static_cast<int>(x.data.Length())); assert(x.data.Length() <= 254); s->WriteUInt8(static_cast<std::uint8_t>(x.data.Length() + 1)); s->WriteUInt8(static_cast<std::uint8_t>(x.type)); s->WriteBytes(*x.data, x.data.Length()); if (x.firstContainingSeq == 0) x.firstContainingSeq = pseq; } } if (m_peerVersion >= 9 && videoStream != nullptr && videoStream->enabled) { s->WriteUInt32(static_cast<std::uint32_t>((m_lastRecvPacketTime - m_connectionInitTime) * 1000)); } } else { if (m_state == State::WAIT_INIT || m_state == State::WAIT_INIT_ACK) { s->WriteUInt32(TLID_DECRYPTED_AUDIO_BLOCK); std::int64_t randomID; crypto.rand_bytes(reinterpret_cast<std::uint8_t*>(&randomID), 8); s->WriteInt64(randomID); std::uint8_t randBytes[7]; crypto.rand_bytes(randBytes, 7); s->WriteUInt8(7); s->WriteBytes(randBytes, 7); std::uint32_t pflags = PFLAG_HAS_RECENT_RECV | PFLAG_HAS_SEQ; if (length > 0) { pflags |= PFLAG_HAS_DATA; } if (m_state == State::WAIT_INIT || m_state == State::WAIT_INIT_ACK) { pflags |= PFLAG_HAS_CALL_ID | PFLAG_HAS_PROTO; } pflags |= (static_cast<std::uint32_t>(type)) << 24; s->WriteUInt32(pflags); if (pflags & PFLAG_HAS_CALL_ID) { s->WriteBytes(m_callID, 16); } s->WriteUInt32(m_lastRemoteSeq); s->WriteUInt32(pseq); s->WriteUInt32(acks); if (pflags & PFLAG_HAS_PROTO) { s->WriteInt32(PROTOCOL_NAME); } if (length > 0) { if (length <= 253) { s->WriteUInt8(static_cast<std::uint8_t>(length)); } else { s->WriteUInt8(254); s->WriteUInt8(static_cast<std::uint8_t>((length >> 0) & 0xFF)); s->WriteUInt8(static_cast<std::uint8_t>((length >> 8) & 0xFF)); s->WriteUInt8(static_cast<std::uint8_t>((length >> 16) & 0xFF)); } } } else { s->WriteUInt32(TLID_SIMPLE_AUDIO_BLOCK); std::int64_t randomID; crypto.rand_bytes(reinterpret_cast<std::uint8_t*>(&randomID), 8); s->WriteInt64(randomID); std::uint8_t randBytes[7]; crypto.rand_bytes(randBytes, 7); s->WriteUInt8(7); s->WriteBytes(randBytes, 7); std::uint32_t lenWithHeader = length + 13; if (lenWithHeader > 0) { if (lenWithHeader <= 253) { s->WriteUInt8(static_cast<std::uint8_t>(lenWithHeader)); } else { s->WriteUInt8(std::uint8_t{254}); s->WriteUInt8(static_cast<std::uint8_t>((lenWithHeader >> 0) & 0xFF)); s->WriteUInt8(static_cast<std::uint8_t>((lenWithHeader >> 8) & 0xFF)); s->WriteUInt8(static_cast<std::uint8_t>((lenWithHeader >> 16) & 0xFF)); } } s->WriteUInt8(static_cast<std::uint8_t>(type)); s->WriteUInt32(m_lastRemoteSeq); s->WriteUInt32(pseq); s->WriteUInt32(acks); if (m_peerVersion >= 6) { if (m_currentExtras.empty()) { s->WriteUInt8(0); } else { s->WriteUInt8(XPFLAG_HAS_EXTRA); s->WriteUInt8(static_cast<std::uint8_t>(m_currentExtras.size())); for (UnacknowledgedExtraData& x : m_currentExtras) { LOGV("Writing extra into header: type %u, length %d", static_cast<std::uint8_t>(x.type), static_cast<int>(x.data.Length())); assert(x.data.Length() <= 254); s->WriteUInt8(static_cast<std::uint8_t>(x.data.Length() + 1)); s->WriteUInt8(static_cast<std::uint8_t>(x.type)); s->WriteBytes(*x.data, x.data.Length()); if (x.firstContainingSeq == 0) x.firstContainingSeq = pseq; } } } } } m_unacknowledgedIncomingPacketCount = 0; m_recentOutgoingPackets.emplace_back(RecentOutgoingPacket { pseq, 0, GetCurrentTime(), 0.0, type, length, source, false }); while (m_recentOutgoingPackets.size() > MAX_RECENT_PACKETS) { m_recentOutgoingPackets.pop_front(); } m_lastSentSeq = pseq; } void VoIPController::SendInit() { ENFORCE_MSG_THREAD; std::uint32_t initSeq = GenerateOutSeq(); for (auto& [_, endpoint] : m_endpoints) { if (endpoint.type == Endpoint::Type::TCP_RELAY && !m_useTCP) continue; BufferOutputStream out(1024); out.WriteInt32(PROTOCOL_VERSION); out.WriteInt32(MIN_PROTOCOL_VERSION); std::uint32_t flags = 0; if (m_config.enableCallUpgrade) flags |= INIT_FLAG_GROUP_CALLS_SUPPORTED; if (m_config.enableVideoReceive) flags |= INIT_FLAG_VIDEO_RECV_SUPPORTED; if (m_config.enableVideoSend) flags |= INIT_FLAG_VIDEO_SEND_SUPPORTED; if (m_dataSavingMode) flags |= INIT_FLAG_DATA_SAVING_ENABLED; out.WriteUInt32(flags); if (m_connectionMaxLayer < 74) { out.WriteUInt8(2); // audio codecs count out.WriteUInt8(CODEC_OPUS_OLD); out.WriteUInt8(0); out.WriteUInt8(0); out.WriteUInt8(0); out.WriteInt32(CODEC_OPUS); out.WriteUInt8(0); // video codecs count (decode) out.WriteUInt8(0); // video codecs count (encode) } else { out.WriteUInt8(std::uint8_t{1}); out.WriteInt32(CODEC_OPUS); std::vector<std::uint32_t> decoders = m_config.enableVideoReceive ? video::VideoRenderer::GetAvailableDecoders() : std::vector<std::uint32_t>(); std::vector<std::uint32_t> encoders = m_config.enableVideoSend ? video::VideoSource::GetAvailableEncoders() : std::vector<std::uint32_t>(); out.WriteUInt8(static_cast<std::uint8_t>(decoders.size())); for (std::uint32_t id : decoders) { out.WriteUInt32(id); } if (m_connectionMaxLayer >= 92) out.WriteUInt8(static_cast<std::uint8_t>(video::VideoRenderer::GetMaximumResolution())); else out.WriteUInt8(std::uint8_t{0}); } std::size_t outLength = out.GetLength(); SendOrEnqueuePacket(PendingOutgoingPacket { /*.seq=*/ initSeq, /*.type=*/ PktType::INIT, /*.len=*/ outLength, /*.data=*/ Buffer(std::move(out)), /*.endpoint=*/endpoint.id }); } if (m_state == State::WAIT_INIT) SetState(State::WAIT_INIT_ACK); m_messageThread.Post([this] { if (m_state == State::WAIT_INIT_ACK) { SendInit(); } }, 0.5); } void VoIPController::InitUDPProxy() { if (m_realUdpSocket != m_udpSocket) { m_udpSocket->Close(); delete m_udpSocket; m_udpSocket = m_realUdpSocket; } char sbuf[128]; std::snprintf(sbuf, sizeof(sbuf), "%s:%u", m_proxyAddress.c_str(), m_proxyPort); std::string proxyHostPort(sbuf); if (proxyHostPort == m_lastTestedProxyServer && !m_proxySupportsUDP) { LOGI("Proxy does not support UDP - using UDP directly instead"); m_messageThread.Post(std::bind(&VoIPController::ResetUdpAvailability, this)); return; } NetworkSocket* tcp = NetworkSocket::Create(NetworkProtocol::TCP); tcp->Connect(m_resolvedProxyAddress, m_proxyPort); std::list<NetworkSocket*> writeSockets; std::list<NetworkSocket*> readSockets; std::list<NetworkSocket*> errorSockets; while (!tcp->IsFailed() && !tcp->IsReadyToSend()) { writeSockets.emplace_back(tcp); if (!NetworkSocket::Select(readSockets, writeSockets, errorSockets, m_selectCanceller)) { LOGW("Select canceled while waiting for proxy control socket to connect"); delete tcp; return; } } LOGV("UDP proxy control socket ready to send"); NetworkSocketSOCKS5Proxy* udpProxy = new NetworkSocketSOCKS5Proxy(tcp, m_realUdpSocket, m_proxyUsername, m_proxyPassword); udpProxy->OnReadyToSend(); writeSockets.clear(); while (!udpProxy->IsFailed() && !tcp->IsFailed() && !udpProxy->IsReadyToSend()) { readSockets.clear(); errorSockets.clear(); readSockets.emplace_back(tcp); errorSockets.emplace_back(tcp); if (!NetworkSocket::Select(readSockets, writeSockets, errorSockets, m_selectCanceller)) { LOGW("Select canceled while waiting for UDP proxy to initialize"); delete udpProxy; return; } if (!readSockets.empty()) udpProxy->OnReadyToReceive(); } LOGV("UDP proxy initialized"); if (udpProxy->IsFailed()) { udpProxy->Close(); delete udpProxy; m_proxySupportsUDP = false; } else { m_udpSocket = udpProxy; } m_messageThread.Post(std::bind(&VoIPController::ResetUdpAvailability, this)); } void VoIPController::RunRecvThread() { LOGI("Receive thread starting"); if (m_proxyProtocol == Proxy::SOCKS5) { m_resolvedProxyAddress = NetworkSocket::ResolveDomainName(m_proxyAddress); if (m_resolvedProxyAddress.IsEmpty()) { LOGW("Error resolving proxy address %s", m_proxyAddress.c_str()); SetState(State::FAILED); return; } } else { m_udpConnectivityState = UdpState::PING_PENDING; m_udpPingTimeoutID = m_messageThread.Post(std::bind(&VoIPController::SendUdpPings, this), 0.0, 0.5); } while (m_runReceiver) { if (m_proxyProtocol == Proxy::SOCKS5 && m_needReInitUdpProxy) { InitUDPProxy(); m_needReInitUdpProxy = false; } std::list<NetworkSocket*> readSockets; std::list<NetworkSocket*> errorSockets; std::list<NetworkSocket*> writeSockets; readSockets.emplace_back(m_udpSocket); errorSockets.emplace_back(m_realUdpSocket); if (!m_realUdpSocket->IsReadyToSend()) writeSockets.emplace_back(m_realUdpSocket); { MutexGuard m(m_endpointsMutex); for (const auto& [_, endpoint] : m_endpoints) { if (endpoint.type == Endpoint::Type::TCP_RELAY) { if (endpoint.m_socket != nullptr) { readSockets.emplace_back(endpoint.m_socket.get()); errorSockets.emplace_back(endpoint.m_socket.get()); if (!endpoint.m_socket->IsReadyToSend()) { NetworkSocketSOCKS5Proxy* proxy = dynamic_cast<NetworkSocketSOCKS5Proxy*>(endpoint.m_socket.get()); if (proxy == nullptr || proxy->NeedSelectForSending()) writeSockets.emplace_back(endpoint.m_socket.get()); } } } } } { bool selRes = NetworkSocket::Select(readSockets, writeSockets, errorSockets, m_selectCanceller); if (!selRes) { LOGV("Select canceled"); continue; } } if (!m_runReceiver) return; if (!errorSockets.empty()) { if (std::find(errorSockets.begin(), errorSockets.end(), m_realUdpSocket) != errorSockets.end()) { LOGW("UDP socket failed"); SetState(State::FAILED); return; } MutexGuard m(m_endpointsMutex); for (NetworkSocket*& socket : errorSockets) { for (auto& [_, endpoint] : m_endpoints) { if (endpoint.m_socket != nullptr && endpoint.m_socket.get() == socket) { endpoint.m_socket->Close(); endpoint.m_socket.reset(); LOGI("Closing failed TCP socket for %s:%u", endpoint.GetAddress().ToString().c_str(), endpoint.port); } } } continue; } for (NetworkSocket*& socket : readSockets) { NetworkPacket packet = socket->Receive(0); if (packet.address.IsEmpty()) { LOGE("Packet has null address. This shouldn't happen."); continue; } if (packet.data.IsEmpty()) { LOGE("Packet has zero length."); continue; } m_messageThread.Post(bind(&VoIPController::NetworkPacketReceived, this, std::make_shared<NetworkPacket>(std::move(packet)))); } if (!writeSockets.empty()) { m_messageThread.Post(std::bind(&VoIPController::TrySendQueuedPackets, this)); } } LOGI("=== recv thread exiting ==="); } void VoIPController::TrySendQueuedPackets() { ENFORCE_MSG_THREAD; for (auto opkt = m_sendQueue.begin(); opkt != m_sendQueue.end();) { Endpoint* endpoint = GetEndpointForPacket(*opkt); if (endpoint == nullptr) { opkt = m_sendQueue.erase(opkt); LOGE("SendQueue contained packet for nonexistent endpoint"); continue; } bool canSend; if (endpoint->type != Endpoint::Type::TCP_RELAY) canSend = m_realUdpSocket->IsReadyToSend(); else canSend = endpoint->m_socket && endpoint->m_socket->IsReadyToSend(); if (canSend) { LOGI("Sending queued packet"); SendOrEnqueuePacket(std::move(*opkt), false); opkt = m_sendQueue.erase(opkt); } else { ++opkt; } } } bool VoIPController::WasOutgoingPacketAcknowledged(std::uint32_t seq) { RecentOutgoingPacket* pkt = GetRecentOutgoingPacket(seq); if (pkt == nullptr) return false; return pkt->ackTime != 0.0; } VoIPController::RecentOutgoingPacket* VoIPController::GetRecentOutgoingPacket(std::uint32_t seq) { for (RecentOutgoingPacket& opkt : m_recentOutgoingPackets) { if (opkt.seq == seq) { return &opkt; } } return nullptr; } void VoIPController::NetworkPacketReceived(std::shared_ptr<NetworkPacket> _packet) { ENFORCE_MSG_THREAD; NetworkPacket& packet = *_packet; std::int64_t srcEndpointID = 0; if (!packet.address.isIPv6) { for (const auto& [_, endpoint] : m_endpoints) { if (endpoint.address == packet.address && endpoint.port == packet.port) { if ((endpoint.type != Endpoint::Type::TCP_RELAY && packet.protocol == NetworkProtocol::UDP) || (endpoint.type == Endpoint::Type::TCP_RELAY && packet.protocol == NetworkProtocol::TCP)) { srcEndpointID = endpoint.id; break; } } } if (srcEndpointID == 0 && packet.protocol == NetworkProtocol::UDP) { try { Endpoint& p2p = GetEndpointByType(Endpoint::Type::UDP_P2P_INET); if (p2p.m_rtts[0] == 0.0 && p2p.address.PrefixMatches(24, packet.address)) { LOGD("Packet source matches p2p endpoint partially: %s:%u", packet.address.ToString().c_str(), packet.port); srcEndpointID = p2p.id; } } catch (const std::out_of_range& exception) { LOGW("No endpoint with type UDP_P2P_INET\nwhat():\n%s", exception.what()); } } } else { for (const auto& [_, endpoint] : m_endpoints) { if (endpoint.v6address == packet.address && endpoint.port == packet.port && endpoint.IsIPv6Only()) { if ((endpoint.type != Endpoint::Type::TCP_RELAY && packet.protocol == NetworkProtocol::UDP) || (endpoint.type == Endpoint::Type::TCP_RELAY && packet.protocol == NetworkProtocol::TCP)) { srcEndpointID = endpoint.id; break; } } } } if (srcEndpointID == 0) { LOGW("Received a packet from unknown source %s:%u", packet.address.ToString().c_str(), packet.port); return; } if (IS_MOBILE_NETWORK(m_networkType)) m_stats.bytesRecvdMobile += static_cast<std::uint64_t>(packet.data.Length()); else m_stats.bytesRecvdWifi += static_cast<std::uint64_t>(packet.data.Length()); try { ProcessIncomingPacket(packet, m_endpoints.at(srcEndpointID)); } catch (const std::out_of_range& exception) { LOGW("Error while parsing packet.\nwhat():\n%s", exception.what()); } } void VoIPController::ProcessRelaySpecialRequest(BufferInputStream& in, Endpoint& srcEndpoint) { in.Seek(16 + 12); std::uint32_t tlid = in.ReadUInt32(); switch (tlid) { case TLID_UDP_REFLECTOR_SELF_INFO: { if (!(srcEndpoint.type == Endpoint::Type::UDP_RELAY /*&& udpConnectivityState==Udp::PING_SENT*/ && in.Remaining() >= 32)) break; std::int32_t date = in.ReadInt32(); std::int64_t queryID = in.ReadInt64(); std::uint8_t myIP[16]; in.ReadBytes(myIP, 16); std::int16_t myPort = in.ReadInt16(); double selfRTT = 0.0; ++srcEndpoint.m_udpPongCount; ++srcEndpoint.m_totalUdpPingReplies; if (srcEndpoint.m_udpPingTimes.find(queryID) != srcEndpoint.m_udpPingTimes.end()) { double sendTime = srcEndpoint.m_udpPingTimes[queryID]; srcEndpoint.m_udpPingTimes.erase(queryID); srcEndpoint.m_selfRtts.Add(selfRTT = GetCurrentTime() - sendTime); } LOGV("Received UDP ping reply from %s:%d: date=%d, queryID=%ld, my IP=%s, my port=%d, selfRTT=%f", srcEndpoint.address.ToString().c_str(), srcEndpoint.port, date, static_cast<long>(queryID), NetworkAddress::IPv4(*reinterpret_cast<std::uint32_t*>(myIP + 12)).ToString().c_str(), myPort, selfRTT); if (srcEndpoint.IsIPv6Only() && !m_didSendIPv6Endpoint) { NetworkAddress realAddr = NetworkAddress::IPv6(myIP); if (realAddr == m_myIPv6) { LOGI("Public IPv6 matches local address"); m_useIPv6 = true; if (m_allowP2p) { m_didSendIPv6Endpoint = true; BufferOutputStream o(18); o.WriteBytes(myIP, 16); o.WriteUInt16(m_udpSocket->GetLocalPort()); Buffer b(std::move(o)); SendExtra(b, ExtraType::IPV6_ENDPOINT); } } } break; } case TLID_UDP_REFLECTOR_PEER_INFO: { if (in.Remaining() < 16) break; std::uint32_t myAddr = in.ReadUInt32(); std::uint16_t myPort = in.ReadUInt16(); std::uint32_t peerAddr = in.ReadUInt32(); std::uint16_t peerPort = in.ReadUInt16(); constexpr std::int64_t p2pID = static_cast<std::int64_t>(FOURCC('P', '2', 'P', '4')) << 32; constexpr std::int64_t lanID = static_cast<std::int64_t>(FOURCC('L', 'A', 'N', '4')) << 32; if (m_currentEndpoint == p2pID || m_currentEndpoint == lanID) m_currentEndpoint = m_preferredRelay; if (m_endpoints.find(lanID) != m_endpoints.end()) { MutexGuard m(m_endpointsMutex); m_endpoints.erase(lanID); } std::uint8_t peerTag[16]; LOGW("Received reflector peer info, my=%s:%u, peer=%s:%u", NetworkAddress::IPv4(myAddr).ToString().c_str(), myPort, NetworkAddress::IPv4(peerAddr).ToString().c_str(), peerPort); if (m_waitingForRelayPeerInfo) { Endpoint p2p(p2pID, peerPort, NetworkAddress::IPv4(peerAddr), NetworkAddress::Empty(), Endpoint::Type::UDP_P2P_INET, peerTag); { MutexGuard m(m_endpointsMutex); m_endpoints[p2pID] = p2p; } if (myAddr == peerAddr) { LOGW("Detected LAN"); NetworkAddress lanAddr = NetworkAddress::IPv4(0); m_udpSocket->GetLocalInterfaceInfo(&lanAddr, nullptr); BufferOutputStream pkt(8); pkt.WriteUInt32(lanAddr.addr.ipv4); pkt.WriteUInt16(m_udpSocket->GetLocalPort()); if (m_peerVersion < 6) { SendPacketReliably(PktType::LAN_ENDPOINT, pkt.GetBuffer(), pkt.GetLength(), 0.5, 10); } else { Buffer buf(std::move(pkt)); SendExtra(buf, ExtraType::LAN_ENDPOINT); } } m_waitingForRelayPeerInfo = false; } break; } default: { LOGV("Received relay response with unknown tl id: 0x%08X", tlid); break; } } } void VoIPController::ProcessIncomingPacket(NetworkPacket& packet, Endpoint& srcEndpoint) { ENFORCE_MSG_THREAD; std::uint8_t* buffer = *packet.data; std::size_t len = packet.data.Length(); BufferInputStream in(packet.data); bool hasPeerTag = false; if (m_peerVersion < 9 || srcEndpoint.type == Endpoint::Type::UDP_RELAY || srcEndpoint.type == Endpoint::Type::TCP_RELAY) { if (std::memcmp(buffer, (srcEndpoint.type == Endpoint::Type::UDP_RELAY || srcEndpoint.type == Endpoint::Type::TCP_RELAY) ? reinterpret_cast<void*>(srcEndpoint.peerTag) : reinterpret_cast<void*>(m_callID), 16) != 0) { LOGW("Received packet has wrong peerTag"); return; } in.Seek(16); hasPeerTag = true; } if (in.Remaining() >= 16 && (srcEndpoint.type == Endpoint::Type::UDP_RELAY || srcEndpoint.type == Endpoint::Type::TCP_RELAY) && *reinterpret_cast<const std::uint64_t*>(buffer + 16) == std::numeric_limits<std::uint64_t>::max() && *reinterpret_cast<const std::uint32_t*>(buffer + 24) == std::numeric_limits<std::uint32_t>::max()) { // relay special request response ProcessRelaySpecialRequest(in, srcEndpoint); return; } if (in.Remaining() < 40) { LOGV("Received packet is too small"); return; } bool retryWith2 = false; std::size_t innerLen = 0; bool shortFormat = m_peerVersion >= 8 || (m_peerVersion == 0 && m_connectionMaxLayer >= 92); if (!m_useMTProto2) { std::uint8_t fingerprint[8], msgHash[16]; in.ReadBytes(fingerprint, 8); in.ReadBytes(msgHash, 16); std::uint8_t key[32], iv[32]; KDF(msgHash, m_isOutgoing ? 8 : 0, key, iv); std::vector<std::uint8_t> aesOut(MSC_STACK_FALLBACK(in.Remaining(), 1500)); if (in.Remaining() > aesOut.size()) return; crypto.aes_ige_decrypt(buffer + in.GetOffset(), aesOut.data(), in.Remaining(), key, iv); BufferInputStream _in(aesOut.data(), in.Remaining()); std::uint8_t sha[SHA1_LENGTH]; std::uint32_t _len = _in.ReadUInt32(); if (_len > _in.Remaining()) _len = static_cast<std::uint32_t>(_in.Remaining()); crypto.sha1(aesOut.data(), static_cast<std::size_t>(_len) + 4, sha); if (std::memcmp(msgHash, sha + (SHA1_LENGTH - 16), 16) != 0) { LOGW("Received packet has wrong hash after decryption"); if (m_state == State::WAIT_INIT || m_state == State::WAIT_INIT_ACK) retryWith2 = true; else return; } else { std::memcpy(buffer + in.GetOffset(), aesOut.data(), in.Remaining()); in.ReadInt32(); } } if (m_useMTProto2 || retryWith2) { if (hasPeerTag) in.Seek(16); // peer tag std::uint8_t fingerprint[8], msgKey[16]; if (!shortFormat) { in.ReadBytes(fingerprint, 8); if (std::memcmp(fingerprint, m_keyFingerprint, 8) != 0) { LOGW("Received packet has wrong key fingerprint"); return; } } in.ReadBytes(msgKey, 16); std::uint8_t decrypted[1500]; std::uint8_t aesKey[32], aesIv[32]; KDF2(msgKey, m_isOutgoing ? 8 : 0, aesKey, aesIv); std::size_t decryptedLen = in.Remaining(); if (decryptedLen > sizeof(decrypted)) return; if (decryptedLen % 16 != 0) { LOGW("wrong decrypted length"); return; } crypto.aes_ige_decrypt(*packet.data + in.GetOffset(), decrypted, decryptedLen, aesKey, aesIv); in = BufferInputStream(decrypted, decryptedLen); std::size_t sizeSize = shortFormat ? 0 : 4; BufferOutputStream buf(decryptedLen + 32); std::size_t x = m_isOutgoing ? 8 : 0; buf.WriteBytes(m_encryptionKey + 88 + x, 32); buf.WriteBytes(decrypted + sizeSize, decryptedLen - sizeSize); std::uint8_t msgKeyLarge[32]; crypto.sha256(buf.GetBuffer(), buf.GetLength(), msgKeyLarge); if (std::memcmp(msgKey, msgKeyLarge + 8, 16) != 0) { LOGW("Received packet has wrong hash"); return; } innerLen = (shortFormat ? in.ReadUInt16() : in.ReadUInt32()); if (innerLen > decryptedLen - sizeSize) { LOGW("Received packet has wrong inner length (%d with total of %u)", static_cast<int>(innerLen), static_cast<unsigned int>(decryptedLen)); return; } if (decryptedLen - innerLen < (shortFormat ? 16 : 12)) { LOGW("Received packet has too little padding (%u)", static_cast<unsigned int>(decryptedLen - innerLen)); return; } std::memcpy(buffer, decrypted + (shortFormat ? 2 : 4), innerLen); in = BufferInputStream(buffer, innerLen); if (retryWith2) { LOGD("Successfully decrypted packet in MTProto2.0 fallback, upgrading"); m_useMTProto2 = true; } } m_lastRecvPacketTime = GetCurrentTime(); if (m_state == State::RECONNECTING) { LOGI("Received a valid packet while reconnecting - setting state to established"); SetState(State::ESTABLISHED); } if (srcEndpoint.type == Endpoint::Type::UDP_P2P_INET && !srcEndpoint.IsIPv6Only()) { if (srcEndpoint.port != packet.port || srcEndpoint.address != packet.address) { if (!packet.address.isIPv6) { LOGI("Incoming packet was decrypted successfully, changing P2P endpoint to %s:%u", packet.address.ToString().c_str(), packet.port); srcEndpoint.address = packet.address; srcEndpoint.port = packet.port; } } } std::uint32_t ackId, pseq, acks; PktType type; std::uint8_t pflags; std::size_t packetInnerLen = 0; if (shortFormat) { type = static_cast<PktType>(in.ReadUInt8()); ackId = in.ReadUInt32(); pseq = in.ReadUInt32(); acks = in.ReadUInt32(); pflags = in.ReadUInt8(); packetInnerLen = innerLen - 14; } else { std::uint32_t tlid = in.ReadUInt32(); switch (tlid) { case TLID_DECRYPTED_AUDIO_BLOCK: { in.ReadInt64(); // random id std::int32_t randLen = in.ReadTlLength(); in.Seek(in.GetOffset() + static_cast<std::size_t>(randLen + pad4(randLen))); std::uint32_t flags = in.ReadUInt32(); type = static_cast<PktType>((flags >> 24) & 0xFF); if (!(flags & PFLAG_HAS_SEQ && flags & PFLAG_HAS_RECENT_RECV)) { LOGW("Received packet doesn't have PFlag::HAS_SEQ, PFlag::HAS_RECENT_RECV, or both"); return; } if (flags & PFLAG_HAS_CALL_ID) { std::uint8_t pktCallID[16]; in.ReadBytes(pktCallID, 16); if (std::memcmp(pktCallID, m_callID, 16) != 0) { LOGW("Received packet has wrong call id"); m_lastError = Error::UNKNOWN; SetState(State::FAILED); return; } } ackId = in.ReadUInt32(); pseq = in.ReadUInt32(); acks = in.ReadUInt32(); if (flags & PFLAG_HAS_PROTO) { std::uint32_t proto = in.ReadUInt32(); if (proto != PROTOCOL_NAME) { LOGW("Received packet uses wrong protocol"); m_lastError = Error::INCOMPATIBLE; SetState(State::FAILED); return; } } if (flags & PFLAG_HAS_EXTRA) { int extraLen = in.ReadTlLength(); in.Seek(in.GetOffset() + static_cast<std::size_t>(extraLen + pad4(extraLen))); } if (flags & PFLAG_HAS_DATA) { packetInnerLen = static_cast<std::size_t>(in.ReadTlLength()); } pflags = 0; break; } case TLID_SIMPLE_AUDIO_BLOCK: { in.ReadInt64(); // random id int randLen = in.ReadTlLength(); in.Seek(in.GetOffset() + static_cast<std::size_t>(randLen + pad4(randLen))); packetInnerLen = static_cast<std::size_t>(in.ReadTlLength()); type = static_cast<PktType>(in.ReadUInt8()); ackId = in.ReadUInt32(); pseq = in.ReadUInt32(); acks = in.ReadUInt32(); if (m_peerVersion >= 6) pflags = in.ReadUInt8(); else pflags = 0; break; } default: { LOGW("Received a packet of unknown type %08X", tlid); return; } } } ++m_packetsReceived; if (seqgt(pseq, m_lastRemoteSeq - MAX_RECENT_PACKETS)) { if (std::find(m_recentIncomingPackets.begin(), m_recentIncomingPackets.end(), pseq) != m_recentIncomingPackets.end()) { LOGW("Received duplicated packet for seq %u", pseq); return; } m_recentIncomingPackets.emplace_back(pseq); while (m_recentIncomingPackets.size() > MAX_RECENT_PACKETS) m_recentIncomingPackets.pop_front(); if (seqgt(pseq, m_lastRemoteSeq)) m_lastRemoteSeq = pseq; } else { LOGW("Packet %u is out of order and too late", pseq); return; } if (pflags & XPFLAG_HAS_EXTRA) { std::uint8_t extraCount = in.ReadUInt8(); for (int i = 0; i < extraCount; i++) { std::size_t extraLen = in.ReadUInt8(); Buffer xbuffer(extraLen); in.ReadBytes(*xbuffer, extraLen); ProcessExtraData(xbuffer); } } std::uint32_t recvTS = 0; if (pflags & XPFLAG_HAS_RECV_TS) { recvTS = in.ReadUInt32(); } if (seqgt(ackId, m_lastRemoteAckSeq)) { if (m_waitingForAcks && m_lastRemoteAckSeq >= m_firstSentPing) { m_RTTHistory.Reset(); m_waitingForAcks = false; m_dontSendPackets = 10; m_messageThread.Post([this] { m_dontSendPackets = 0; }, 1.0); LOGI("resuming sending"); } std::vector<std::uint32_t> peerAcks; m_lastRemoteAckSeq = ackId; m_congestionControl->PacketAcknowledged(ackId); peerAcks.emplace_back(ackId); for (unsigned int i = 0; i < 32; ++i) { if ((acks >> (31 - i)) & 1) { peerAcks.emplace_back(ackId - (i + 1)); } } for (RecentOutgoingPacket& opkt : m_recentOutgoingPackets) { if (opkt.ackTime != 0.0) continue; if (std::find(peerAcks.begin(), peerAcks.end(), opkt.seq) != peerAcks.end()) { opkt.ackTime = GetCurrentTime(); if (opkt.lost) { LOGW("acknowledged lost packet %u", opkt.seq); --m_sendLosses; } if (opkt.sender != nullptr && !opkt.lost) { // don't report lost packets as acknowledged to PacketSenders opkt.sender->PacketAcknowledged(opkt.seq, opkt.sendTime, recvTS / 1000.0, opkt.type, opkt.size); } // TODO move this to a PacketSender m_congestionControl->PacketAcknowledged(opkt.seq); } } if (m_peerVersion < 6) { std::size_t index = 0; for (auto it = m_queuedPackets.begin(); it != m_queuedPackets.end();) { QueuedPacket& qp = *it; bool didAck = false; for (std::size_t j = 0; j < qp.seqs.Size(); ++j) { LOGD("queued packet %u, seq %u=%u", static_cast<unsigned>(index), static_cast<unsigned int>(j), qp.seqs[j]); if (qp.seqs[j] == 0) break; int remoteAcksIndex = static_cast<int>(m_lastRemoteAckSeq - qp.seqs[j]); if (seqgt(m_lastRemoteAckSeq, qp.seqs[j]) && remoteAcksIndex >= 0 && remoteAcksIndex < 32) { for (RecentOutgoingPacket& opkt : m_recentOutgoingPackets) { if (opkt.seq == qp.seqs[j] && opkt.ackTime > 0) { LOGD("did ack seq %u, removing", qp.seqs[j]); didAck = true; break; } } if (didAck) break; } } if (didAck) { it = m_queuedPackets.erase(it); } else { ++it; ++index; } } } else { for (auto x = m_currentExtras.begin(); x != m_currentExtras.end();) { if (x->firstContainingSeq != 0 && (m_lastRemoteAckSeq == x->firstContainingSeq || seqgt(m_lastRemoteAckSeq, x->firstContainingSeq))) { LOGV("Peer acknowledged extra type %u length %u", static_cast<std::uint8_t>(x->type), static_cast<unsigned int>(x->data.Length())); ProcessAcknowledgedOutgoingExtra(*x); x = m_currentExtras.erase(x); continue; } ++x; } } } Endpoint* currentEndpoint = &m_endpoints.at(m_currentEndpoint); if ( srcEndpoint.id != m_currentEndpoint && (srcEndpoint.type == Endpoint::Type::UDP_RELAY || srcEndpoint.type == Endpoint::Type::TCP_RELAY) && ((currentEndpoint->type != Endpoint::Type::UDP_RELAY && currentEndpoint->type != Endpoint::Type::TCP_RELAY) || currentEndpoint->m_averageRTT == 0)) { if (seqgt(m_lastSentSeq - 32, m_lastRemoteAckSeq)) { m_currentEndpoint = srcEndpoint.id; currentEndpoint = &srcEndpoint; LOGI("Peer network address probably changed, switching to relay"); if (m_allowP2p) SendPublicEndpointsRequest(); } } if (m_config.logPacketStats) { DebugLoggedPacket dpkt = { static_cast<std::int32_t>(pseq), GetCurrentTime() - m_connectionInitTime, static_cast<std::int32_t>(packet.data.Length()) }; m_debugLoggedPackets.emplace_back(dpkt); if (m_debugLoggedPackets.size() >= 2500) { m_debugLoggedPackets.erase(m_debugLoggedPackets.begin(), m_debugLoggedPackets.begin() + 500); } } ++m_unacknowledgedIncomingPacketCount; if (m_unacknowledgedIncomingPacketCount > m_unackNopThreshold) { SendNopPacket(); } #ifdef LOG_PACKETS LOGV("Received: from=%s:%u, seq=%u, length=%u, type=%s", srcEndpoint.GetAddress().ToString().c_str(), srcEndpoint.port, pseq, (unsigned int)packet.data.Length(), GetPacketTypeString(type).c_str()); #endif switch (type) { case PktType::NOP: LOGE("Received packet of NOP type"); break; case PktType::UPDATE_STREAMS: LOGE("Received packet of UPDATE_STREAMS type"); break; case PktType::SWITCH_TO_P2P: LOGE("Received packet of SWITCH_TO_P2P type"); break; case PktType::SWITCH_PREF_RELAY: LOGE("Received packet of SWITCH_PREF_RELAY type"); break; case PktType::INIT: { LOGD("Received init"); std::int32_t ver = in.ReadInt32(); if (!m_receivedInit) m_peerVersion = ver; LOGI("Peer version is %d", m_peerVersion); std::uint32_t minVer = in.ReadUInt32(); if (minVer > PROTOCOL_VERSION || m_peerVersion < MIN_PROTOCOL_VERSION) { m_lastError = Error::INCOMPATIBLE; SetState(State::FAILED); return; } std::uint32_t flags = in.ReadUInt32(); if (!m_receivedInit) { if (flags & INIT_FLAG_DATA_SAVING_ENABLED) { m_dataSavingRequestedByPeer = true; UpdateDataSavingState(); UpdateAudioBitrateLimit(); } if (flags & INIT_FLAG_GROUP_CALLS_SUPPORTED) { m_peerCapabilities |= TGVOIP_PEER_CAP_GROUP_CALLS; } if (flags & INIT_FLAG_VIDEO_RECV_SUPPORTED) { m_peerCapabilities |= TGVOIP_PEER_CAP_VIDEO_DISPLAY; } if (flags & INIT_FLAG_VIDEO_SEND_SUPPORTED) { m_peerCapabilities |= TGVOIP_PEER_CAP_VIDEO_CAPTURE; } } std::uint8_t numSupportedAudioCodecs = in.ReadUInt8(); for (int i = 0; i < numSupportedAudioCodecs; ++i) { if (m_peerVersion < 5) in.ReadUInt8(); // ignore for now else in.ReadInt32(); } if (!m_receivedInit && ((flags & INIT_FLAG_VIDEO_SEND_SUPPORTED && m_config.enableVideoReceive) || (flags & INIT_FLAG_VIDEO_RECV_SUPPORTED && m_config.enableVideoSend))) { LOGD("Peer video decoders:"); std::uint8_t numSupportedVideoDecoders = in.ReadUInt8(); for (int i = 0; i < numSupportedVideoDecoders; ++i) { std::uint32_t id = in.ReadUInt32(); m_peerVideoDecoders.emplace_back(id); char* _id = reinterpret_cast<char*>(&id); LOGD("%c%c%c%c", _id[3], _id[2], _id[1], _id[0]); } m_protocolInfo.maxVideoResolution = static_cast<InitVideoRes>(in.ReadUInt8()); SetupOutgoingVideoStream(); } BufferOutputStream out(1024); out.WriteInt32(PROTOCOL_VERSION); out.WriteInt32(MIN_PROTOCOL_VERSION); out.WriteUInt8(static_cast<std::uint8_t>(m_outgoingStreams.size())); for (const std::shared_ptr<Stream>& stream : m_outgoingStreams) { out.WriteUInt8(stream->id); out.WriteUInt8(static_cast<std::uint8_t>(stream->type)); if (m_peerVersion < 5) out.WriteUInt8(static_cast<std::uint8_t>(stream->codec == CODEC_OPUS ? CODEC_OPUS_OLD : 0)); else out.WriteUInt32(stream->codec); out.WriteUInt16(stream->frameDuration); out.WriteUInt8(stream->enabled ? 1 : 0); } LOGI("Sending init ack"); std::size_t outLength = out.GetLength(); SendOrEnqueuePacket(PendingOutgoingPacket { /*.seq=*/ GenerateOutSeq(), /*.type=*/ PktType::INIT_ACK, /*.len=*/ outLength, /*.data=*/ Buffer(std::move(out)), /*.endpoint=*/0 }); if (!m_receivedInit) { m_receivedInit = true; if ((srcEndpoint.type == Endpoint::Type::UDP_RELAY && m_udpConnectivityState != UdpState::BAD && m_udpConnectivityState != UdpState::NOT_AVAILABLE) || srcEndpoint.type == Endpoint::Type::TCP_RELAY) { m_currentEndpoint = srcEndpoint.id; if (srcEndpoint.type == Endpoint::Type::UDP_RELAY || (m_useTCP && srcEndpoint.type == Endpoint::Type::TCP_RELAY)) m_preferredRelay = srcEndpoint.id; } } if (!m_audioStarted && m_receivedInitAck) { StartAudio(); m_audioStarted = true; } break; } case PktType::INIT_ACK: { LOGD("Received init ack"); if (m_receivedInitAck) break; m_receivedInitAck = true; m_messageThread.Cancel(m_initTimeoutID); m_initTimeoutID = MessageThread::INVALID_ID; if (packetInnerLen > 10) { m_peerVersion = in.ReadInt32(); std::uint32_t minVer = in.ReadUInt32(); if (minVer > PROTOCOL_VERSION || m_peerVersion < MIN_PROTOCOL_VERSION) { m_lastError = Error::INCOMPATIBLE; SetState(State::FAILED); return; } } else { m_peerVersion = 1; } LOGI("peer version from init ack %d", m_peerVersion); std::uint8_t streamCount = in.ReadUInt8(); if (streamCount == 0) return; std::shared_ptr<Stream> incomingAudioStream = nullptr; for (int i = 0; i < streamCount; ++i) { std::shared_ptr<Stream> stream = std::make_shared<Stream>(); stream->id = in.ReadUInt8(); std::uint8_t type = in.ReadUInt8(); if (m_peerVersion < 5) { std::uint8_t codec = in.ReadUInt8(); if (codec == CODEC_OPUS_OLD) stream->codec = CODEC_OPUS; } else { stream->codec = in.ReadUInt32(); } in.ReadInt16(); stream->frameDuration = 60; stream->enabled = in.ReadUInt8() == 1; if (type == static_cast<std::uint8_t>(StreamType::VIDEO) && m_peerVersion < 9) { stream->type = StreamType::VIDEO; LOGV("Skipping video stream for old protocol version"); continue; } if (type == static_cast<std::uint8_t>(StreamType::AUDIO)) { stream->type = StreamType::AUDIO; stream->jitterBuffer = std::make_shared<JitterBuffer>(nullptr, stream->frameDuration); if (stream->frameDuration > 50) stream->jitterBuffer->SetMinPacketCount(static_cast<std::uint32_t>(ServerConfig::GetSharedInstance()->GetInt("jitter_initial_delay_60", 2))); else if (stream->frameDuration > 30) stream->jitterBuffer->SetMinPacketCount(static_cast<std::uint32_t>(ServerConfig::GetSharedInstance()->GetInt("jitter_initial_delay_40", 4))); else stream->jitterBuffer->SetMinPacketCount(static_cast<std::uint32_t>(ServerConfig::GetSharedInstance()->GetInt("jitter_initial_delay_20", 6))); stream->decoder = nullptr; } else if (type == static_cast<std::uint8_t>(StreamType::VIDEO)) { stream->type = StreamType::VIDEO; if (!stream->packetReassembler) { stream->packetReassembler = std::make_shared<PacketReassembler>(); stream->packetReassembler->SetCallback(bind(&VoIPController::ProcessIncomingVideoFrame, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, std::placeholders::_4)); } } else { LOGW("Unknown incoming stream type: %u", type); continue; } m_incomingStreams.emplace_back(stream); if (stream->type == StreamType::AUDIO && !incomingAudioStream) incomingAudioStream = stream; } if (incomingAudioStream == nullptr) return; if (m_peerVersion >= 5 && !m_useMTProto2) { m_useMTProto2 = true; LOGD("MTProto2 wasn't initially enabled for whatever reason but peer supports it; upgrading"); } if (!m_audioStarted && m_receivedInit) { StartAudio(); m_audioStarted = true; } m_messageThread.Post([this] { if (m_state == State::WAIT_INIT_ACK) { SetState(State::ESTABLISHED); } }, ServerConfig::GetSharedInstance()->GetDouble("established_delay_if_no_stream_data", 1.5)); if (m_allowP2p) SendPublicEndpointsRequest(); break; } case PktType::STREAM_DATA: case PktType::STREAM_DATA_X2: case PktType::STREAM_DATA_X3: { if (!m_receivedFirstStreamPacket) { m_receivedFirstStreamPacket = true; if (m_state != State::ESTABLISHED && m_receivedInitAck) { m_messageThread.Post([this]() { SetState(State::ESTABLISHED); }, 0.5); LOGW("First audio packet - setting state to ESTABLISHED"); } } int count; switch (type) { case PktType::STREAM_DATA: count = 1; break; case PktType::STREAM_DATA_X2: count = 2; break; case PktType::STREAM_DATA_X3: count = 3; break; default: assert(false); break; } if (srcEndpoint.type == Endpoint::Type::UDP_RELAY && srcEndpoint.id != m_peerPreferredRelay) { m_peerPreferredRelay = srcEndpoint.id; } for (int i = 0; i < count; ++i) { std::uint8_t streamID = in.ReadUInt8(); std::uint8_t flags = streamID & 0xC0; streamID &= 0x3F; std::uint16_t sdlen = (flags & STREAM_DATA_FLAG_LEN16 ? in.ReadUInt16() : in.ReadUInt8()); std::uint32_t pts = in.ReadUInt32(); std::uint8_t fragmentCount = 1; std::uint8_t fragmentIndex = 0; m_audioTimestampIn = pts; if (!m_audioOutStarted && m_audioOutput != nullptr) { MutexGuard m(m_audioIOMutex); m_audioOutput->Start(); m_audioOutStarted = true; } bool fragmented = static_cast<bool>(sdlen & STREAM_DATA_XFLAG_FRAGMENTED); bool extraFEC = static_cast<bool>(sdlen & STREAM_DATA_XFLAG_EXTRA_FEC); bool keyframe = static_cast<bool>(sdlen & STREAM_DATA_XFLAG_KEYFRAME); if (fragmented) { fragmentIndex = in.ReadUInt8(); fragmentCount = in.ReadUInt8(); } sdlen &= 0x7FF; if (in.GetOffset() + sdlen > len) { return; } std::shared_ptr<Stream> stream; for (std::shared_ptr<Stream>& ss : m_incomingStreams) { if (ss->id == streamID) { stream = ss; break; } } if (stream == nullptr) { LOGW("received packet for unknown stream %u", static_cast<unsigned int>(streamID)); } else { switch (stream->type) { case StreamType::AUDIO: { if (stream->jitterBuffer == nullptr) break; stream->jitterBuffer->HandleInput(reinterpret_cast<const std::uint8_t*>(buffer + in.GetOffset()), sdlen, pts, false); if (extraFEC) { in.Seek(in.GetOffset() + sdlen); std::uint8_t fecCount = in.ReadUInt8(); for (unsigned int j = 0; j < fecCount; ++j) { std::uint8_t dlen = in.ReadUInt8(); std::uint8_t data[256]; in.ReadBytes(data, dlen); stream->jitterBuffer->HandleInput(data, dlen, pts - (fecCount - j) * stream->frameDuration, true); } } } case StreamType::VIDEO: { if (stream->packetReassembler == nullptr) break; std::uint8_t frameSeq = in.ReadUInt8(); Buffer pdata(sdlen); std::uint16_t rotation = 0; if (fragmentIndex == 0) { VideoRotation rotationEnum = static_cast<VideoRotation>(in.ReadUInt8() & std::uint8_t{VIDEO_ROTATION_MASK}); switch (rotationEnum) { case VideoRotation::_0: rotation = 0; break; case VideoRotation::_90: rotation = 90; break; case VideoRotation::_180: rotation = 180; break; case VideoRotation::_270: rotation = 270; break; // default: // unreachable on sane CPUs // std::abort(); } } pdata.CopyFrom(buffer + in.GetOffset(), 0, sdlen); stream->packetReassembler->AddFragment(std::move(pdata), fragmentIndex, fragmentCount, pts, frameSeq, keyframe, rotation); } } } if (i < count - 1) in.Seek(in.GetOffset() + sdlen); } break; } case PktType::PING: { if (srcEndpoint.type != Endpoint::Type::UDP_RELAY && srcEndpoint.type != Endpoint::Type::TCP_RELAY && !m_allowP2p) { LOGW("Received p2p ping but p2p is disabled by manual override"); return; } BufferOutputStream pkt(128); pkt.WriteUInt32(pseq); std::size_t pktLength = pkt.GetLength(); SendOrEnqueuePacket(PendingOutgoingPacket { /*.seq=*/ GenerateOutSeq(), /*.type=*/ PktType::PONG, /*.len=*/ pktLength, /*.data=*/ Buffer(std::move(pkt)), /*.endpoint=*/srcEndpoint.id, }); break; } case PktType::PONG: { if (packetInnerLen < 4) break; std::uint32_t pingSeq = in.ReadUInt32(); #ifdef LOG_PACKETS LOGD("Received pong for ping in seq %u", pingSeq); #endif if (pingSeq == srcEndpoint.m_lastPingSeq) { srcEndpoint.m_rtts.Add(GetCurrentTime() - srcEndpoint.m_lastPingTime); srcEndpoint.m_averageRTT = srcEndpoint.m_rtts.NonZeroAverage(); LOGD("Current RTT via %s: %.3f, average: %.3f", packet.address.ToString().c_str(), srcEndpoint.m_rtts[0], srcEndpoint.m_averageRTT); if (srcEndpoint.m_averageRTT > m_rateMaxAcceptableRTT) m_needRate = true; } break; } case PktType::STREAM_STATE: { std::uint8_t id = in.ReadUInt8(); std::uint8_t enabled = in.ReadUInt8(); LOGV("Peer stream state: id %u flags %u", id, enabled); for (std::shared_ptr<Stream>& stream : m_incomingStreams) { if (stream->id == id) { stream->enabled = enabled == 1; UpdateAudioOutputState(); break; } } break; } case PktType::LAN_ENDPOINT: { LOGV("received lan endpoint"); std::uint32_t peerAddr = in.ReadUInt32(); std::uint16_t peerPort = in.ReadUInt16(); constexpr std::int64_t lanID = static_cast<std::int64_t>(FOURCC('L', 'A', 'N', '4')) << 32; std::uint8_t peerTag[16]; Endpoint lan(lanID, peerPort, NetworkAddress::IPv4(peerAddr), NetworkAddress::Empty(), Endpoint::Type::UDP_P2P_LAN, peerTag); if (m_currentEndpoint == lanID) m_currentEndpoint = m_preferredRelay; MutexGuard m(m_endpointsMutex); m_endpoints[lanID] = lan; break; } case PktType::NETWORK_CHANGED: { if (!(currentEndpoint->type != Endpoint::Type::UDP_RELAY && currentEndpoint->type != Endpoint::Type::TCP_RELAY)) break; m_currentEndpoint = m_preferredRelay; if (m_allowP2p) SendPublicEndpointsRequest(); if (m_peerVersion >= 2) { std::uint32_t flags = in.ReadUInt32(); m_dataSavingRequestedByPeer = (flags & INIT_FLAG_DATA_SAVING_ENABLED) == INIT_FLAG_DATA_SAVING_ENABLED; UpdateDataSavingState(); UpdateAudioBitrateLimit(); ResetEndpointPingStats(); } break; } case PktType::STREAM_EC: { std::uint8_t streamID = in.ReadUInt8(); if (m_peerVersion < 7) { std::uint32_t lastTimestamp = in.ReadUInt32(); std::uint8_t count = in.ReadUInt8(); for (std::shared_ptr<Stream>& stream : m_incomingStreams) { if (stream->id == streamID) { for (unsigned int i = 0; i < count; ++i) { std::uint8_t dlen = in.ReadUInt8(); std::uint8_t data[256]; in.ReadBytes(data, dlen); if (stream->jitterBuffer != nullptr) { stream->jitterBuffer->HandleInput(data, dlen, lastTimestamp - (count - i - 1) * stream->frameDuration, true); } } break; } } } else { std::shared_ptr<Stream> stream = GetStreamByID(streamID, false); if (stream == nullptr) { LOGW("Received FEC packet for unknown stream %u", streamID); return; } if (stream->type != StreamType::VIDEO) { LOGW("Received FEC packet for non-video stream %u", streamID); return; } if (stream->packetReassembler == nullptr) return; std::uint8_t fseq = in.ReadUInt8(); std::uint8_t fecScheme = in.ReadUInt8(); std::uint8_t prevFrameCount = in.ReadUInt8(); std::uint16_t fecLen = in.ReadUInt16(); if (fecLen > in.Remaining()) return; Buffer fecData(fecLen); in.ReadBytes(fecData); stream->packetReassembler->AddFEC(std::move(fecData), fseq, prevFrameCount, fecScheme); } break; } } } void VoIPController::ProcessExtraData(Buffer& data) { BufferInputStream in(*data, data.Length()); ExtraType type = static_cast<ExtraType>(in.ReadUInt8()); alignas(8) std::uint8_t fullHash[SHA1_LENGTH]; crypto.sha1(*data, data.Length(), fullHash); std::uint64_t hash = *reinterpret_cast<std::uint64_t*>(fullHash); if (m_lastReceivedExtrasByType[type] == hash) { return; } LOGE("ProcessExtraData"); m_lastReceivedExtrasByType[type] = hash; switch (type) { case ExtraType::STREAM_FLAGS: { std::uint8_t id = in.ReadUInt8(); std::uint32_t flags = in.ReadUInt32(); LOGV("Peer stream state: id %u flags %u", id, flags); for (std::shared_ptr<Stream>& s : m_incomingStreams) { if (s->id == id) { bool prevEnabled = s->enabled; bool prevPaused = s->paused; s->enabled = (flags & STREAM_FLAG_ENABLED) == STREAM_FLAG_ENABLED; s->paused = (flags & STREAM_FLAG_PAUSED) == STREAM_FLAG_PAUSED; if (flags & STREAM_FLAG_EXTRA_EC) { if (!s->extraECEnabled) { s->extraECEnabled = true; if (s->jitterBuffer) s->jitterBuffer->SetMinPacketCount(4); } } else { if (s->extraECEnabled) { s->extraECEnabled = false; if (s->jitterBuffer) s->jitterBuffer->SetMinPacketCount(2); } } if (prevEnabled != s->enabled && s->type == StreamType::VIDEO && m_videoRenderer) m_videoRenderer->SetStreamEnabled(s->enabled); if (prevPaused != s->paused && s->type == StreamType::VIDEO && m_videoRenderer) m_videoRenderer->SetStreamPaused(s->paused); UpdateAudioOutputState(); break; } } break; } case ExtraType::STREAM_CSD: { LOGI("Received codec specific data"); std::uint8_t streamID = in.ReadUInt8(); for (std::shared_ptr<Stream>& stream : m_incomingStreams) { if (stream->id == streamID) { stream->codecSpecificData.clear(); stream->csdIsValid = false; stream->width = static_cast<unsigned int>(in.ReadUInt16()); stream->height = static_cast<unsigned int>(in.ReadUInt16()); std::size_t count = in.ReadUInt8(); for (std::size_t i = 0; i < count; i++) { std::size_t len = in.ReadUInt8(); Buffer csd(len); in.ReadBytes(*csd, len); stream->codecSpecificData.emplace_back(std::move(csd)); } break; } } break; } case ExtraType::LAN_ENDPOINT: { if (!m_allowP2p) return; LOGV("received lan endpoint (extra)"); std::uint32_t peerAddr = in.ReadUInt32(); std::uint16_t peerPort = in.ReadUInt16(); constexpr std::int64_t lanID = static_cast<std::int64_t>(FOURCC('L', 'A', 'N', '4')) << 32; if (m_currentEndpoint == lanID) m_currentEndpoint = m_preferredRelay; std::uint8_t peerTag[16]; Endpoint lan(lanID, peerPort, NetworkAddress::IPv4(peerAddr), NetworkAddress::Empty(), Endpoint::Type::UDP_P2P_LAN, peerTag); MutexGuard m(m_endpointsMutex); m_endpoints[lanID] = lan; break; } case ExtraType::NETWORK_CHANGED: { LOGI("Peer network changed"); m_wasNetworkHandover = true; const Endpoint& _currentEndpoint = m_endpoints.at(m_currentEndpoint); if (_currentEndpoint.type != Endpoint::Type::UDP_RELAY && _currentEndpoint.type != Endpoint::Type::TCP_RELAY) m_currentEndpoint = m_preferredRelay; if (m_allowP2p) SendPublicEndpointsRequest(); std::uint32_t flags = in.ReadUInt32(); m_dataSavingRequestedByPeer = (flags & INIT_FLAG_DATA_SAVING_ENABLED) == INIT_FLAG_DATA_SAVING_ENABLED; UpdateDataSavingState(); UpdateAudioBitrateLimit(); ResetEndpointPingStats(); break; } case ExtraType::GROUP_CALL_KEY: { if (!m_didReceiveGroupCallKey && !m_didSendGroupCallKey) { std::uint8_t groupKey[256]; in.ReadBytes(groupKey, 256); m_messageThread.Post([this, &groupKey] { if (m_callbacks.groupCallKeyReceived) m_callbacks.groupCallKeyReceived(this, groupKey); }); m_didReceiveGroupCallKey = true; } break; } case ExtraType::REQUEST_GROUP: { if (!m_didInvokeUpgradeCallback) { m_messageThread.Post([this] { if (m_callbacks.upgradeToGroupCallRequested) m_callbacks.upgradeToGroupCallRequested(this); }); m_didInvokeUpgradeCallback = true; } break; } case ExtraType::IPV6_ENDPOINT: { if (!m_allowP2p) return; std::uint8_t _addr[16]; in.ReadBytes(_addr, 16); NetworkAddress addr = NetworkAddress::IPv6(_addr); std::uint16_t port = in.ReadUInt16(); m_peerIPv6Available = true; LOGV("Received peer IPv6 endpoint [%s]:%u", addr.ToString().c_str(), port); constexpr std::int64_t p2pID = static_cast<std::int64_t>(FOURCC('P', '2', 'P', '6')) << 32; Endpoint ep; ep.type = Endpoint::Type::UDP_P2P_INET; ep.port = port; ep.v6address = addr; ep.id = p2pID; m_endpoints[p2pID] = ep; if (!m_myIPv6.IsEmpty()) m_currentEndpoint = p2pID; break; } } } void VoIPController::ProcessAcknowledgedOutgoingExtra(UnacknowledgedExtraData& extra) { if (extra.type == ExtraType::GROUP_CALL_KEY) { if (!m_didReceiveGroupCallKeyAck) { m_didReceiveGroupCallKeyAck = true; m_messageThread.Post([this] { if (m_callbacks.groupCallKeySent) m_callbacks.groupCallKeySent(this); }); } } } Endpoint& VoIPController::GetRemoteEndpoint() { return m_endpoints.at(m_currentEndpoint); } Endpoint* VoIPController::GetEndpointForPacket(const PendingOutgoingPacket& pkt) { Endpoint* endpoint = nullptr; if (pkt.endpoint != 0) { try { endpoint = &m_endpoints.at(pkt.endpoint); } catch (const std::out_of_range& exception) { LOGW("Unable to send packet via nonexistent endpoint %" PRIu64 "\nwhat():\n%s", pkt.endpoint, exception.what()); return nullptr; } } if (endpoint == nullptr) endpoint = &m_endpoints.at(m_currentEndpoint); return endpoint; } bool VoIPController::SendOrEnqueuePacket(PendingOutgoingPacket pkt, bool enqueue, PacketSender* source) { ENFORCE_MSG_THREAD; Endpoint* endpoint = GetEndpointForPacket(pkt); if (endpoint == nullptr) { std::abort(); return false; } bool canSend; if (endpoint->type != Endpoint::Type::TCP_RELAY) { canSend = m_realUdpSocket->IsReadyToSend(); } else { if (endpoint->m_socket == nullptr) { LOGV("Connecting to %s:%u", endpoint->GetAddress().ToString().c_str(), endpoint->port); if (m_proxyProtocol == Proxy::NONE) { endpoint->m_socket = std::make_shared<NetworkSocketTCPObfuscated>(NetworkSocket::Create(NetworkProtocol::TCP)); endpoint->m_socket->Connect(endpoint->GetAddress(), endpoint->port); } else if (m_proxyProtocol == Proxy::SOCKS5) { NetworkSocket* tcp = NetworkSocket::Create(NetworkProtocol::TCP); tcp->Connect(m_resolvedProxyAddress, m_proxyPort); std::shared_ptr<NetworkSocketSOCKS5Proxy> proxy = std::make_shared<NetworkSocketSOCKS5Proxy>(tcp, nullptr, m_proxyUsername, m_proxyPassword); endpoint->m_socket = proxy; endpoint->m_socket->Connect(endpoint->GetAddress(), endpoint->port); } m_selectCanceller->CancelSelect(); } canSend = endpoint->m_socket && endpoint->m_socket->IsReadyToSend(); } if (!canSend) { if (enqueue) { LOGW("Not ready to send - enqueueing"); m_sendQueue.emplace_back(std::move(pkt)); } return false; } if ((endpoint->type == Endpoint::Type::TCP_RELAY && m_useTCP) || (endpoint->type != Endpoint::Type::TCP_RELAY && m_useUDP)) { BufferOutputStream p(1500); WritePacketHeader(pkt.seq, &p, pkt.type, static_cast<std::uint32_t>(pkt.len), source); p.WriteBytes(pkt.data); SendPacket(p.GetBuffer(), p.GetLength(), *endpoint, pkt); if (pkt.type == PktType::STREAM_DATA) { --m_unsentStreamPackets; } } return true; } void VoIPController::SendPacket(std::uint8_t* data, std::size_t len, Endpoint& ep, PendingOutgoingPacket& srcPacket) { if (m_stopping) return; if (ep.type == Endpoint::Type::TCP_RELAY && !m_useTCP) return; BufferOutputStream out(len + 128); if (ep.type == Endpoint::Type::UDP_RELAY || ep.type == Endpoint::Type::TCP_RELAY) out.WriteBytes(ep.peerTag, 16); else if (m_peerVersion < 9) out.WriteBytes(m_callID, 16); if (len > 0) { if (m_useMTProto2) { BufferOutputStream inner(len + 128); std::size_t sizeSize; if (m_peerVersion >= 8 || (!m_peerVersion && m_connectionMaxLayer >= 92)) { inner.WriteUInt16(static_cast<std::uint16_t>(len)); sizeSize = 0; } else { inner.WriteUInt32(static_cast<std::uint32_t>(len)); out.WriteBytes(m_keyFingerprint, 8); sizeSize = 4; } inner.WriteBytes(data, len); std::size_t padLen = 16 - inner.GetLength() % 16; if (padLen < 16) padLen += 16; std::uint8_t padding[32]; crypto.rand_bytes(padding, padLen); inner.WriteBytes(padding, padLen); assert(inner.GetLength() % 16 == 0); std::uint8_t key[32], iv[32], msgKey[16]; BufferOutputStream buf(len + 32); std::size_t x = m_isOutgoing ? 0 : 8; buf.WriteBytes(m_encryptionKey + 88 + x, 32); buf.WriteBytes(inner.GetBuffer() + sizeSize, inner.GetLength() - sizeSize); std::uint8_t msgKeyLarge[32]; crypto.sha256(buf.GetBuffer(), buf.GetLength(), msgKeyLarge); std::memcpy(msgKey, msgKeyLarge + 8, 16); KDF2(msgKey, m_isOutgoing ? 0 : 8, key, iv); out.WriteBytes(msgKey, 16); std::vector<std::uint8_t> aesOut(MSC_STACK_FALLBACK(inner.GetLength(), 1500)); crypto.aes_ige_encrypt(inner.GetBuffer(), aesOut.data(), inner.GetLength(), key, iv); out.WriteBytes(aesOut.data(), inner.GetLength()); } else { BufferOutputStream inner(len + 128); inner.WriteUInt32(static_cast<std::uint32_t>(len)); inner.WriteBytes(data, len); if (inner.GetLength() % 16 != 0) { std::size_t padLen = 16 - inner.GetLength() % 16; std::uint8_t padding[16]; crypto.rand_bytes(padding, padLen); inner.WriteBytes(padding, padLen); } assert(inner.GetLength() % 16 == 0); std::uint8_t key[32], iv[32], msgHash[SHA1_LENGTH]; crypto.sha1(inner.GetBuffer(), len + 4, msgHash); out.WriteBytes(m_keyFingerprint, 8); out.WriteBytes((msgHash + (SHA1_LENGTH - 16)), 16); KDF(msgHash + (SHA1_LENGTH - 16), m_isOutgoing ? 0 : 8, key, iv); std::vector<std::uint8_t> aesOut(MSC_STACK_FALLBACK(inner.GetLength(), 1500)); crypto.aes_ige_encrypt(inner.GetBuffer(), aesOut.data(), inner.GetLength(), key, iv); out.WriteBytes(aesOut.data(), inner.GetLength()); } } #ifdef LOG_PACKETS LOGV("Sending: to=%s:%u, seq=%u, length=%u, type=%s", ep.GetAddress().ToString().c_str(), ep.port, srcPacket.seq, (unsigned int)out.GetLength(), GetPacketTypeString(srcPacket.type).c_str()); #endif m_rawSendQueue.Put(RawPendingOutgoingPacket { NetworkPacket { Buffer(std::move(out)), ep.GetAddress(), ep.port, ep.type == Endpoint::Type::TCP_RELAY ? NetworkProtocol::TCP : NetworkProtocol::UDP }, ep.type == Endpoint::Type::TCP_RELAY ? ep.m_socket : nullptr }); } void VoIPController::ActuallySendPacket(NetworkPacket pkt, Endpoint& ep) { if (IS_MOBILE_NETWORK(m_networkType)) m_stats.bytesSentMobile += static_cast<std::uint64_t>(pkt.data.Length()); else m_stats.bytesSentWifi += static_cast<std::uint64_t>(pkt.data.Length()); if (ep.type == Endpoint::Type::TCP_RELAY) { if (ep.m_socket != nullptr && !ep.m_socket->IsFailed()) { ep.m_socket->Send(std::move(pkt)); } } else { m_udpSocket->Send(std::move(pkt)); } } std::string VoIPController::NetworkTypeToString(NetType type) { switch (type) { case NetType::WIFI: return "wifi"; case NetType::GPRS: return "gprs"; case NetType::EDGE: return "edge"; case NetType::THREE_G: return "3g"; case NetType::HSPA: return "hspa"; case NetType::LTE: return "lte"; case NetType::ETHERNET: return "ethernet"; case NetType::OTHER_HIGH_SPEED: return "other_high_speed"; case NetType::OTHER_LOW_SPEED: return "other_low_speed"; case NetType::DIALUP: return "dialup"; case NetType::OTHER_MOBILE: return "other_mobile"; case NetType::UNKNOWN: return "unknown"; } throw std::invalid_argument("NetType " + std::to_string(static_cast<int>(type)) + " is not one of enum values!"); } std::string VoIPController::GetPacketTypeString(PktType type) { switch (type) { case PktType::INIT: return "init"; case PktType::INIT_ACK: return "init_ack"; case PktType::STREAM_STATE: return "stream_state"; case PktType::STREAM_DATA: return "stream_data"; case PktType::PING: return "ping"; case PktType::PONG: return "pong"; case PktType::LAN_ENDPOINT: return "lan_endpoint"; case PktType::NETWORK_CHANGED: return "network_changed"; case PktType::NOP: return "nop"; case PktType::STREAM_EC: return "stream_ec"; case PktType::UPDATE_STREAMS: return "update_streams"; case PktType::STREAM_DATA_X2: return "stream_data_x2"; case PktType::STREAM_DATA_X3: return "stream_data_x3"; case PktType::SWITCH_PREF_RELAY: return "switch_pref_relay"; case PktType::SWITCH_TO_P2P: return "switch_to_p2p"; } return std::string("unknown " + std::to_string(static_cast<std::uint8_t>(type))); } void VoIPController::AddIPv6Relays() { if (!m_myIPv6.IsEmpty() && !m_didAddIPv6Relays) { std::unordered_map<std::string, std::vector<Endpoint>> endpointsByAddress; for (auto& [_, endpoint] : m_endpoints) { if ((endpoint.type == Endpoint::Type::UDP_RELAY || endpoint.type == Endpoint::Type::TCP_RELAY) && !endpoint.v6address.IsEmpty() && !endpoint.address.IsEmpty()) { endpointsByAddress[endpoint.v6address.ToString()].emplace_back(endpoint); } } MutexGuard m(m_endpointsMutex); for (auto& [_, endpoints] : endpointsByAddress) { for (Endpoint& endpoint : endpoints) { m_didAddIPv6Relays = true; endpoint.address = NetworkAddress::Empty(); endpoint.id = endpoint.id ^ (static_cast<std::int64_t>(FOURCC('I', 'P', 'v', '6')) << 32); endpoint.m_averageRTT = 0; endpoint.m_lastPingSeq = 0; endpoint.m_lastPingTime = 0; endpoint.m_rtts.Reset(); endpoint.m_udpPongCount = 0; m_endpoints[endpoint.id] = endpoint; LOGD("Adding IPv6-only endpoint [%s]:%u", endpoint.v6address.ToString().c_str(), endpoint.port); } } } } void VoIPController::AddTCPRelays() { if (!m_didAddTcpRelays) { bool wasSetCurrentToTCP = m_setCurrentEndpointToTCP; LOGV("Adding TCP relays"); std::vector<Endpoint> relays; for (auto& [_, endpoint] : m_endpoints) { if (endpoint.type != Endpoint::Type::UDP_RELAY) continue; if (wasSetCurrentToTCP && !m_useUDP) { endpoint.m_rtts.Reset(); endpoint.m_averageRTT = 0; endpoint.m_lastPingSeq = 0; } Endpoint tcpRelay(endpoint); tcpRelay.type = Endpoint::Type::TCP_RELAY; tcpRelay.m_averageRTT = 0; tcpRelay.m_lastPingSeq = 0; tcpRelay.m_lastPingTime = 0; tcpRelay.m_rtts.Reset(); tcpRelay.m_udpPongCount = 0; tcpRelay.id = tcpRelay.id ^ (static_cast<std::int64_t>(FOURCC('T', 'C', 'P', 0)) << 32); if (m_setCurrentEndpointToTCP && m_endpoints.at(m_currentEndpoint).type != Endpoint::Type::TCP_RELAY) { LOGV("Setting current endpoint to TCP"); m_setCurrentEndpointToTCP = false; m_currentEndpoint = tcpRelay.id; m_preferredRelay = tcpRelay.id; } relays.emplace_back(tcpRelay); } MutexGuard m(m_endpointsMutex); for (Endpoint& e : relays) { m_endpoints[e.id] = e; } m_didAddTcpRelays = true; } } #if defined(__APPLE__) static void initMachTimestart() { mach_timebase_info_data_t tb = {0, 0}; mach_timebase_info(&tb); VoIPController::machTimebase = tb.numer; VoIPController::machTimebase /= tb.denom; VoIPController::machTimestart = mach_absolute_time(); } #endif double VoIPController::GetCurrentTime() { #if defined(__linux__) struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ts.tv_sec + ts.tv_nsec / 1000000000.0; #elif defined(__APPLE__) static pthread_once_t token = PTHREAD_ONCE_INIT; pthread_once(&token, &initMachTimestart); return (mach_absolute_time() - machTimestart) * machTimebase / 1000000000.0f; #elif defined(_WIN32) if (!didInitWin32TimeScale) { LARGE_INTEGER scale; QueryPerformanceFrequency(&scale); win32TimeScale = scale.QuadPart; didInitWin32TimeScale = true; } LARGE_INTEGER t; QueryPerformanceCounter(&t); return (double)t.QuadPart / (double)win32TimeScale; #endif } void VoIPController::KDF(std::uint8_t* msgKey, std::size_t x, std::uint8_t* aesKey, std::uint8_t* aesIv) { std::uint8_t sA[SHA1_LENGTH], sB[SHA1_LENGTH], sC[SHA1_LENGTH], sD[SHA1_LENGTH]; BufferOutputStream buf(128); buf.WriteBytes(msgKey, 16); buf.WriteBytes(m_encryptionKey + x, 32); crypto.sha1(buf.GetBuffer(), buf.GetLength(), sA); buf.Reset(); buf.WriteBytes(m_encryptionKey + 32 + x, 16); buf.WriteBytes(msgKey, 16); buf.WriteBytes(m_encryptionKey + 48 + x, 16); crypto.sha1(buf.GetBuffer(), buf.GetLength(), sB); buf.Reset(); buf.WriteBytes(m_encryptionKey + 64 + x, 32); buf.WriteBytes(msgKey, 16); crypto.sha1(buf.GetBuffer(), buf.GetLength(), sC); buf.Reset(); buf.WriteBytes(msgKey, 16); buf.WriteBytes(m_encryptionKey + 96 + x, 32); crypto.sha1(buf.GetBuffer(), buf.GetLength(), sD); buf.Reset(); buf.WriteBytes(sA, 8); buf.WriteBytes(sB + 8, 12); buf.WriteBytes(sC + 4, 12); assert(buf.GetLength() == 32); std::memcpy(aesKey, buf.GetBuffer(), 32); buf.Reset(); buf.WriteBytes(sA + 8, 12); buf.WriteBytes(sB, 8); buf.WriteBytes(sC + 16, 4); buf.WriteBytes(sD, 8); assert(buf.GetLength() == 32); std::memcpy(aesIv, buf.GetBuffer(), 32); } void VoIPController::KDF2(std::uint8_t* msgKey, std::size_t x, std::uint8_t* aesKey, std::uint8_t* aesIv) { std::uint8_t sA[32], sB[32]; BufferOutputStream buf(128); buf.WriteBytes(msgKey, 16); buf.WriteBytes(m_encryptionKey + x, 36); crypto.sha256(buf.GetBuffer(), buf.GetLength(), sA); buf.Reset(); buf.WriteBytes(m_encryptionKey + 40 + x, 36); buf.WriteBytes(msgKey, 16); crypto.sha256(buf.GetBuffer(), buf.GetLength(), sB); buf.Reset(); buf.WriteBytes(sA, 8); buf.WriteBytes(sB + 8, 16); buf.WriteBytes(sA + 24, 8); std::memcpy(aesKey, buf.GetBuffer(), 32); buf.Reset(); buf.WriteBytes(sB, 8); buf.WriteBytes(sA + 8, 16); buf.WriteBytes(sB + 24, 8); std::memcpy(aesIv, buf.GetBuffer(), 32); } void VoIPController::SendPublicEndpointsRequest(const Endpoint& relay) { if (!m_useUDP) return; LOGD("Sending public endpoints request to %s:%d", relay.address.ToString().c_str(), relay.port); m_publicEndpointsReqTime = GetCurrentTime(); m_waitingForRelayPeerInfo = true; Buffer buf(32); std::memcpy(*buf, relay.peerTag, 16); std::memset(*buf + 16, 0xFF, 16); m_udpSocket->Send(NetworkPacket { std::move(buf), relay.address, relay.port, NetworkProtocol::UDP }); } Endpoint& VoIPController::GetEndpointByType(Endpoint::Type type) { if (type == Endpoint::Type::UDP_RELAY && m_preferredRelay) return m_endpoints.at(m_preferredRelay); for (auto& [_, endpoint] : m_endpoints) if (endpoint.type == type) return endpoint; throw std::out_of_range("no endpoint"); } void VoIPController::SendPacketReliably(PktType type, std::uint8_t* data, std::size_t len, double retryInterval, double timeout) { ENFORCE_MSG_THREAD; LOGD("Send reliably, type=%u, len=%u, retry=%.3f, timeout=%.3f", static_cast<std::uint8_t>(type), unsigned(len), retryInterval, timeout); QueuedPacket pkt; if (data) { Buffer b(len); b.CopyFrom(data, 0, len); pkt.data = std::move(b); } pkt.type = type; pkt.retryInterval = retryInterval; pkt.timeout = timeout; pkt.firstSentTime = 0; pkt.lastSentTime = 0; m_queuedPackets.emplace_back(std::move(pkt)); m_messageThread.Post(std::bind(&VoIPController::UpdateQueuedPackets, this)); if (timeout > 0.0) { m_messageThread.Post(std::bind(&VoIPController::UpdateQueuedPackets, this), timeout); } } void VoIPController::SendExtra(Buffer& data, ExtraType type) { ENFORCE_MSG_THREAD; LOGV("Sending extra type %u length %u", static_cast<std::uint8_t>(type), static_cast<unsigned int>(data.Length())); for (UnacknowledgedExtraData& extraData : m_currentExtras) { if (extraData.type == type) { extraData.firstContainingSeq = 0; extraData.data = std::move(data); return; } } UnacknowledgedExtraData xd = { type, std::move(data), 0 }; m_currentExtras.emplace_back(std::move(xd)); } void VoIPController::DebugCtl(int request, int param) { } void VoIPController::SendUdpPing(Endpoint& endpoint) { if (endpoint.type != Endpoint::Type::UDP_RELAY) return; BufferOutputStream p(1024); p.WriteBytes(endpoint.peerTag, 16); p.WriteInt32(-1); p.WriteInt32(-1); p.WriteInt32(-1); p.WriteInt32(-2); std::int64_t id; crypto.rand_bytes(reinterpret_cast<std::uint8_t*>(&id), 8); p.WriteInt64(id); endpoint.m_udpPingTimes[id] = GetCurrentTime(); m_udpSocket->Send(NetworkPacket { Buffer(std::move(p)), endpoint.GetAddress(), endpoint.port, NetworkProtocol::UDP}); endpoint.m_totalUdpPings++; LOGV("Sending UDP ping to %s:%d, id %" PRId64, endpoint.GetAddress().ToString().c_str(), endpoint.port, id); } void VoIPController::ResetUdpAvailability() { ENFORCE_MSG_THREAD; LOGI("Resetting UDP availability"); if (m_udpPingTimeoutID != MessageThread::INVALID_ID) { m_messageThread.Cancel(m_udpPingTimeoutID); } { for (std::pair<const std::int64_t, Endpoint>& e : m_endpoints) { e.second.m_udpPongCount = 0; e.second.m_udpPingTimes.clear(); } } m_udpPingCount = 0; m_udpConnectivityState = UdpState::PING_PENDING; m_udpPingTimeoutID = m_messageThread.Post(std::bind(&VoIPController::SendUdpPings, this), 0.0, 0.5); } void VoIPController::ResetEndpointPingStats() { ENFORCE_MSG_THREAD; for (std::pair<const std::int64_t, Endpoint>& e : m_endpoints) { e.second.m_averageRTT = 0.0; e.second.m_rtts.Reset(); } } #pragma mark - Video void VoIPController::SetVideoSource(video::VideoSource* source) { std::shared_ptr<Stream> stream = GetStreamByType(StreamType::VIDEO, true); if (stream == nullptr) { LOGE("Can't set video source when there is no outgoing video stream"); return; } if (source != nullptr) { if (!stream->enabled) { stream->enabled = true; m_messageThread.Post([this, stream] { SendStreamFlags(*stream); }); } if (m_videoPacketSender == nullptr) m_videoPacketSender = new video::VideoPacketSender(this, source, stream); else m_videoPacketSender->SetSource(source); } else { if (stream->enabled) { stream->enabled = false; m_messageThread.Post([this, stream] { SendStreamFlags(*stream); }); } if (m_videoPacketSender != nullptr) { m_videoPacketSender->SetSource(nullptr); } } } void VoIPController::SetVideoRenderer(video::VideoRenderer* renderer) { m_videoRenderer = renderer; } void VoIPController::SetVideoCodecSpecificData(const std::vector<Buffer>& data) { m_outgoingStreams[1]->codecSpecificData.clear(); for (const Buffer& csd : data) { m_outgoingStreams[1]->codecSpecificData.emplace_back(Buffer::CopyOf(csd)); } LOGI("Set outgoing video stream CSD"); } void VoIPController::SendVideoFrame(const Buffer& frame, std::uint32_t flags, std::uint32_t rotation) { std::shared_ptr<Stream> stream = GetStreamByType(StreamType::VIDEO, true); if (stream != nullptr) { } } void VoIPController::ProcessIncomingVideoFrame(Buffer frame, std::uint32_t pts, bool keyframe, std::uint16_t rotation) { if (frame.Length() == 0) { LOGE("EMPTY FRAME"); } if (m_videoRenderer != nullptr) { std::shared_ptr<Stream> stream = GetStreamByType(StreamType::VIDEO, false); std::size_t offset = 0; if (keyframe) { BufferInputStream in(frame); std::uint16_t width = in.ReadUInt16(); std::uint16_t height = in.ReadUInt16(); std::uint8_t sizeAndFlag = in.ReadUInt8(); int size = sizeAndFlag & 0x0F; bool reset = (sizeAndFlag & 0x80) == 0x80; if (reset || !stream->csdIsValid || stream->width != width || stream->height != height) { stream->width = width; stream->height = height; stream->codecSpecificData.clear(); for (int i = 0; i < size; ++i) { std::size_t len = in.ReadUInt8(); Buffer b(len); in.ReadBytes(b); stream->codecSpecificData.emplace_back(std::move(b)); } stream->csdIsValid = false; } else { for (int i = 0; i < size; i++) { std::size_t len = in.ReadUInt8(); in.Seek(in.GetOffset() + len); } } offset = in.GetOffset(); } if (!stream->csdIsValid && stream->width && stream->height) { m_videoRenderer->Reset(stream->codec, stream->width, stream->height, stream->codecSpecificData); stream->csdIsValid = true; } if (m_lastReceivedVideoFrameNumber == UINT32_MAX || m_lastReceivedVideoFrameNumber == pts - 1 || keyframe) { m_lastReceivedVideoFrameNumber = pts; //LOGV("3 before decode %u", (unsigned int)frame.Length()); if (stream->rotation != rotation) { stream->rotation = rotation; m_videoRenderer->SetRotation(rotation); } if (offset == 0) { m_videoRenderer->DecodeAndDisplay(std::move(frame), pts); } else { m_videoRenderer->DecodeAndDisplay(Buffer::CopyOf(frame, offset, frame.Length() - offset), pts); } } else { LOGW("Skipping non-keyframe after packet loss..."); } } } void VoIPController::SetupOutgoingVideoStream() { std::vector<std::uint32_t> myEncoders = video::VideoSource::GetAvailableEncoders(); std::shared_ptr<Stream> vstm = std::make_shared<Stream>(); vstm->id = 2; vstm->type = StreamType::VIDEO; if (std::find(myEncoders.begin(), myEncoders.end(), CODEC_HEVC) != myEncoders.end() && std::find(m_peerVideoDecoders.begin(), m_peerVideoDecoders.end(), CODEC_HEVC) != m_peerVideoDecoders.end()) { vstm->codec = CODEC_HEVC; } else if (std::find(myEncoders.begin(), myEncoders.end(), CODEC_AVC) != myEncoders.end() && std::find(m_peerVideoDecoders.begin(), m_peerVideoDecoders.end(), CODEC_AVC) != m_peerVideoDecoders.end()) { vstm->codec = CODEC_AVC; } else if (std::find(myEncoders.begin(), myEncoders.end(), CODEC_VP8) != myEncoders.end() && std::find(m_peerVideoDecoders.begin(), m_peerVideoDecoders.end(), CODEC_VP8) != m_peerVideoDecoders.end()) { vstm->codec = CODEC_VP8; } else { LOGW("Can't setup outgoing video stream: no codecs in common"); return; } vstm->enabled = false; m_outgoingStreams.emplace_back(vstm); } #pragma mark - Timer methods void VoIPController::SendUdpPings() { LOGW("Send udp pings"); ENFORCE_MSG_THREAD; for (std::pair<const std::int64_t, Endpoint>& e : m_endpoints) { if (e.second.type == Endpoint::Type::UDP_RELAY) { SendUdpPing(e.second); } } if (m_udpConnectivityState == UdpState::UNKNOWN || m_udpConnectivityState == UdpState::PING_PENDING) m_udpConnectivityState = UdpState::PING_SENT; m_udpPingCount++; if (m_udpPingCount == 4 || m_udpPingCount == 10) { m_messageThread.CancelSelf(); m_udpPingTimeoutID = m_messageThread.Post(std::bind(&VoIPController::EvaluateUdpPingResults, this), 1.0); } } void VoIPController::EvaluateUdpPingResults() { double avgPongs = 0; int count = 0; for (auto& [_, endpoint] : m_endpoints) { if (endpoint.type == Endpoint::Type::UDP_RELAY) { if (endpoint.m_udpPongCount > 0) { avgPongs += endpoint.m_udpPongCount; ++count; } } } if (count > 0) avgPongs /= count; else avgPongs = 0.0; LOGI("UDP ping reply count: %.2f", avgPongs); if (avgPongs == 0.0 && m_proxyProtocol == Proxy::SOCKS5 && m_udpSocket != m_realUdpSocket) { LOGI("Proxy does not let UDP through, closing proxy connection and using UDP directly"); NetworkSocket* proxySocket = m_udpSocket; proxySocket->Close(); m_udpSocket = m_realUdpSocket; m_selectCanceller->CancelSelect(); delete proxySocket; m_proxySupportsUDP = false; ResetUdpAvailability(); return; } bool configUseTCP = ServerConfig::GetSharedInstance()->GetBoolean("use_tcp", true); if (configUseTCP) { if (avgPongs == 0.0 || (m_udpConnectivityState == UdpState::BAD && avgPongs < 7.0)) { if (m_needRateFlags & NEED_RATE_FLAG_UDP_NA) m_needRate = true; m_udpConnectivityState = UdpState::NOT_AVAILABLE; m_useTCP = true; m_useUDP = avgPongs > 1.0; if (m_endpoints.at(m_currentEndpoint).type != Endpoint::Type::TCP_RELAY) m_setCurrentEndpointToTCP = true; AddTCPRelays(); m_waitingForRelayPeerInfo = false; } else if (avgPongs < 3.0) { if (m_needRateFlags & NEED_RATE_FLAG_UDP_BAD) m_needRate = true; m_udpConnectivityState = UdpState::BAD; m_useTCP = true; m_setCurrentEndpointToTCP = true; AddTCPRelays(); m_udpPingTimeoutID = m_messageThread.Post(std::bind(&VoIPController::SendUdpPings, this), 0.5, 0.5); } else { m_udpPingTimeoutID = MessageThread::INVALID_ID; m_udpConnectivityState = UdpState::AVAILABLE; } } else { m_udpPingTimeoutID = MessageThread::INVALID_ID; m_udpConnectivityState = UdpState::NOT_AVAILABLE; } } void VoIPController::SendRelayPings() { ENFORCE_MSG_THREAD; if ((m_state == State::ESTABLISHED || m_state == State::RECONNECTING) && m_endpoints.size() > 1) { Endpoint* _preferredRelay = &m_endpoints.at(m_preferredRelay); Endpoint* _currentEndpoint = &m_endpoints.at(m_currentEndpoint); Endpoint* minPingRelay = _preferredRelay; double minPing = _preferredRelay->m_averageRTT * (_preferredRelay->type == Endpoint::Type::TCP_RELAY ? 2 : 1); if (minPing == 0.0) // force the switch to an available relay, if any minPing = std::numeric_limits<double>::max(); for (std::pair<const std::int64_t, Endpoint>& _endpoint : m_endpoints) { Endpoint& endpoint = _endpoint.second; if (endpoint.type == Endpoint::Type::TCP_RELAY && !m_useTCP) continue; if (endpoint.type == Endpoint::Type::UDP_RELAY && !m_useUDP) continue; if (GetCurrentTime() - endpoint.m_lastPingTime >= 10) { LOGV("Sending ping to %s", endpoint.GetAddress().ToString().c_str()); SendOrEnqueuePacket(PendingOutgoingPacket { /*.seq=*/ (endpoint.m_lastPingSeq = GenerateOutSeq()), /*.type=*/ PktType::PING, /*.len=*/ 0, /*.data=*/ Buffer(), /*.endpoint=*/endpoint.id }); endpoint.m_lastPingTime = GetCurrentTime(); } if ((m_useUDP && endpoint.type == Endpoint::Type::UDP_RELAY) || (m_useTCP && endpoint.type == Endpoint::Type::TCP_RELAY)) { double k = endpoint.type == Endpoint::Type::UDP_RELAY ? 1 : 2; if (endpoint.m_averageRTT > 0 && endpoint.m_averageRTT * k < minPing * m_relaySwitchThreshold) { minPing = endpoint.m_averageRTT * k; minPingRelay = &endpoint; } } } if (minPingRelay->id != m_preferredRelay) { m_preferredRelay = minPingRelay->id; _preferredRelay = minPingRelay; LOGV("set preferred relay to %s", _preferredRelay->address.ToString().c_str()); if (_currentEndpoint->type == Endpoint::Type::UDP_RELAY || _currentEndpoint->type == Endpoint::Type::TCP_RELAY) { m_currentEndpoint = m_preferredRelay; _currentEndpoint = _preferredRelay; } } if (_currentEndpoint->type == Endpoint::Type::UDP_RELAY && m_useUDP) { constexpr std::int64_t p2pID = static_cast<std::int64_t>(FOURCC('P', '2', 'P', '4')) << 32; constexpr std::int64_t lanID = static_cast<std::int64_t>(FOURCC('L', 'A', 'N', '4')) << 32; if (m_endpoints.find(p2pID) != m_endpoints.end()) { Endpoint& p2p = m_endpoints[p2pID]; if (m_endpoints.find(lanID) != m_endpoints.end() && m_endpoints[lanID].m_averageRTT > 0 && m_endpoints[lanID].m_averageRTT < minPing * m_relayToP2pSwitchThreshold) { m_currentEndpoint = lanID; LOGI("Switching to p2p (LAN)"); } else { if (p2p.m_averageRTT > 0 && p2p.m_averageRTT < minPing * m_relayToP2pSwitchThreshold) { m_currentEndpoint = p2pID; LOGI("Switching to p2p (Inet)"); } } } } else { if (minPing > 0 && minPing < _currentEndpoint->m_averageRTT * m_p2pToRelaySwitchThreshold) { LOGI("Switching to relay"); m_currentEndpoint = m_preferredRelay; } } } } void VoIPController::UpdateRTT() { m_RTTHistory.Add(GetAverageRTT()); m_waitingForAcks = (m_RTTHistory[0] > 10.0 && m_RTTHistory[8] > 10.0 && (m_networkType == NetType::EDGE || m_networkType == NetType::GPRS)); for (const std::shared_ptr<Stream>& stream : m_incomingStreams) { if (stream->jitterBuffer != nullptr) { int lostCount = stream->jitterBuffer->GetAndResetLostPacketCount(); if (lostCount > 0 || (lostCount < 0 && m_recvLossCount > static_cast<std::uint32_t>(-lostCount))) m_recvLossCount += static_cast<std::uint32_t>(lostCount); } } } void VoIPController::UpdateCongestion() { if (m_congestionControl == nullptr || m_encoder == nullptr) return; std::uint32_t sendLossCount = m_congestionControl->GetSendLossCount(); m_sendLossCountHistory.Add(sendLossCount - m_prevSendLossCount); m_prevSendLossCount = sendLossCount; double packetsPerSec = 1000.0 / m_outgoingStreams[0]->frameDuration; double avgSendLossCount = m_sendLossCountHistory.Average() / packetsPerSec; if (avgSendLossCount > m_packetLossToEnableExtraEC && m_networkType != NetType::GPRS && m_networkType != NetType::EDGE) { if (!m_shittyInternetMode) { // Shitty Internet Mode™. Redundant redundancy you can trust. m_shittyInternetMode = true; for (std::shared_ptr<Stream>& s : m_outgoingStreams) { if (s->type == StreamType::AUDIO) { s->extraECEnabled = true; SendStreamFlags(*s); break; } } m_encoder->SetSecondaryEncoderEnabled(true); LOGW("Enabling extra EC"); if (m_needRateFlags & NEED_RATE_FLAG_SHITTY_INTERNET_MODE) m_needRate = true; m_wasExtraEC = true; } } if (avgSendLossCount > 0.08) m_extraEcLevel = 4; else if (avgSendLossCount > 0.05) m_extraEcLevel = 3; else if (avgSendLossCount > 0.02) m_extraEcLevel = 2; else m_extraEcLevel = 0; m_encoder->SetPacketLoss(static_cast<int>(avgSendLossCount * 100)); if (avgSendLossCount > m_rateMaxAcceptableSendLoss) m_needRate = true; if ((avgSendLossCount < m_packetLossToEnableExtraEC || m_networkType == NetType::EDGE || m_networkType == NetType::GPRS) && m_shittyInternetMode) { m_shittyInternetMode = false; for (std::shared_ptr<Stream>& s : m_outgoingStreams) { if (s->type == StreamType::AUDIO) { s->extraECEnabled = false; SendStreamFlags(*s); break; } } m_encoder->SetSecondaryEncoderEnabled(false); LOGW("Disabling extra EC"); } if (!m_wasEncoderLaggy && m_encoder->GetComplexity() < 10) m_wasEncoderLaggy = true; } void VoIPController::UpdateAudioBitrate() { if (m_congestionControl == nullptr || m_encoder == nullptr) return; double time = GetCurrentTime(); if ((m_audioInput != nullptr && !m_audioInput->IsInitialized()) || (m_audioOutput != nullptr && !m_audioOutput->IsInitialized())) { LOGE("Audio I/O failed"); m_lastError = Error::AUDIO_IO; SetState(State::FAILED); } tgvoip::ConctlAct act = m_congestionControl->GetBandwidthControlAction(); if (m_shittyInternetMode) { m_encoder->SetBitrate(8000); } else if (act == tgvoip::ConctlAct::DECREASE) { std::uint32_t bitrate = m_encoder->GetBitrate(); if (bitrate > 8000) m_encoder->SetBitrate(bitrate < (m_minAudioBitrate + m_audioBitrateStepDecr) ? m_minAudioBitrate : (bitrate - m_audioBitrateStepDecr)); } else if (act == tgvoip::ConctlAct::INCREASE) { std::uint32_t bitrate = m_encoder->GetBitrate(); if (bitrate < m_maxBitrate) m_encoder->SetBitrate(bitrate + m_audioBitrateStepIncr); } if (m_state == State::ESTABLISHED && time - m_lastRecvPacketTime >= m_reconnectingTimeout) { SetState(State::RECONNECTING); if (m_needRateFlags & NEED_RATE_FLAG_RECONNECTING) m_needRate = true; m_wasReconnecting = true; ResetUdpAvailability(); } if (m_state == State::ESTABLISHED || m_state == State::RECONNECTING) { if (time - m_lastRecvPacketTime >= m_config.recvTimeout) { const Endpoint& _currentEndpoint = m_endpoints.at(m_currentEndpoint); if (_currentEndpoint.type != Endpoint::Type::UDP_RELAY && _currentEndpoint.type != Endpoint::Type::TCP_RELAY) { LOGW("Packet receive timeout, switching to relay"); m_currentEndpoint = m_preferredRelay; for (auto& [_, endpoint] : m_endpoints) { if (endpoint.type == Endpoint::Type::UDP_P2P_INET || endpoint.type == Endpoint::Type::UDP_P2P_LAN) { endpoint.m_averageRTT = 0; endpoint.m_rtts.Reset(); } } if (m_allowP2p) { SendPublicEndpointsRequest(); } UpdateDataSavingState(); UpdateAudioBitrateLimit(); BufferOutputStream s(4); s.WriteInt32(m_dataSavingMode ? INIT_FLAG_DATA_SAVING_ENABLED : 0); if (m_peerVersion < 6) { SendPacketReliably(PktType::NETWORK_CHANGED, s.GetBuffer(), s.GetLength(), 1, 20); } else { Buffer buf(std::move(s)); SendExtra(buf, ExtraType::NETWORK_CHANGED); } m_lastRecvPacketTime = time; } else { LOGW("Packet receive timeout, disconnecting"); m_lastError = Error::TIMEOUT; SetState(State::FAILED); } } } } void VoIPController::UpdateSignalBars() { int prevSignalBarCount = GetSignalBarsCount(); double packetsPerSec = 1000.0 / m_outgoingStreams[0]->frameDuration; double avgSendLossCount = m_sendLossCountHistory.Average() / packetsPerSec; int signalBarCount = 4; if (m_state == State::RECONNECTING || m_waitingForAcks) signalBarCount = 1; if (m_endpoints.at(m_currentEndpoint).type == Endpoint::Type::TCP_RELAY) { signalBarCount = std::min(signalBarCount, 3); } if (avgSendLossCount > 0.1) { signalBarCount = 1; } else if (avgSendLossCount > 0.0625) { signalBarCount = std::min(signalBarCount, 2); } else if (avgSendLossCount > 0.025) { signalBarCount = std::min(signalBarCount, 3); } for (std::shared_ptr<Stream>& stream : m_incomingStreams) { if (stream->jitterBuffer != nullptr) { double avgLateCount[3]; stream->jitterBuffer->GetAverageLateCount(avgLateCount); if (avgLateCount[2] >= 0.2) signalBarCount = 1; else if (avgLateCount[2] >= 0.1) signalBarCount = std::min(signalBarCount, 2); } } m_signalBarsHistory.Add(static_cast<unsigned char>(signalBarCount)); int _signalBarCount = GetSignalBarsCount(); if (_signalBarCount != prevSignalBarCount) { LOGD("SIGNAL BAR COUNT CHANGED: %d", _signalBarCount); if (m_callbacks.signalBarCountChanged) m_callbacks.signalBarCountChanged(this, _signalBarCount); } } void VoIPController::UpdateQueuedPackets() { std::vector<PendingOutgoingPacket> packetsToSend; for (auto qp = m_queuedPackets.begin(); qp != m_queuedPackets.end();) { if (qp->timeout > 0 && qp->firstSentTime > 0 && GetCurrentTime() - qp->firstSentTime >= qp->timeout) { LOGD("Removing queued packet because of timeout"); qp = m_queuedPackets.erase(qp); continue; } if (GetCurrentTime() - qp->lastSentTime >= qp->retryInterval) { m_messageThread.Post(std::bind(&VoIPController::UpdateQueuedPackets, this), qp->retryInterval); std::uint32_t seq = GenerateOutSeq(); qp->seqs.Add(seq); qp->lastSentTime = GetCurrentTime(); Buffer buf(qp->data.Length()); if (qp->firstSentTime == 0) qp->firstSentTime = qp->lastSentTime; if (qp->data.Length()) buf.CopyFrom(qp->data, qp->data.Length()); packetsToSend.emplace_back(PendingOutgoingPacket { /*.seq=*/ seq, /*.type=*/ qp->type, /*.len=*/ qp->data.Length(), /*.data=*/ std::move(buf), /*.endpoint=*/0 }); } ++qp; } for (PendingOutgoingPacket& pkt : packetsToSend) { SendOrEnqueuePacket(std::move(pkt)); } } void VoIPController::SendNopPacket() { if (m_state != State::ESTABLISHED) return; SendOrEnqueuePacket(PendingOutgoingPacket { /*.seq=*/ (m_firstSentPing = GenerateOutSeq()), /*.type=*/ PktType::NOP, /*.len=*/ 0, /*.data=*/ Buffer(), /*.endpoint=*/0 }); } void VoIPController::SendPublicEndpointsRequest() { ENFORCE_MSG_THREAD; if (!m_allowP2p) return; LOGI("Sending public endpoints request"); for (std::pair<const std::int64_t, Endpoint>& e : m_endpoints) { if (e.second.type == Endpoint::Type::UDP_RELAY && !e.second.IsIPv6Only()) { SendPublicEndpointsRequest(e.second); } } ++m_publicEndpointsReqCount; if (m_publicEndpointsReqCount < 10) { m_messageThread.Post([this] { if (m_waitingForRelayPeerInfo) { LOGW("Resending peer relay info request"); SendPublicEndpointsRequest(); } }, 5.0); } else { m_publicEndpointsReqCount = 0; } } void VoIPController::TickJitterBufferAndCongestionControl() { // TODO get rid of this and update states of these things internally and retroactively for (std::shared_ptr<Stream>& stream : m_incomingStreams) { if (stream->jitterBuffer != nullptr) { stream->jitterBuffer->Tick(); } } if (m_congestionControl != nullptr) { m_congestionControl->Tick(); } double currentTime = GetCurrentTime(); double rtt = GetAverageRTT(); double packetLossTimeout = std::max(rtt * 2.0, 0.1); for (RecentOutgoingPacket& pkt : m_recentOutgoingPackets) { if (pkt.ackTime != 0.0 || pkt.lost) continue; if (currentTime - pkt.sendTime > packetLossTimeout) { pkt.lost = true; ++m_sendLosses; LOGW("Outgoing packet lost: seq=%u, type=%s, size=%u", pkt.seq, GetPacketTypeString(pkt.type).c_str(), static_cast<unsigned int>(pkt.size)); if (pkt.sender) { pkt.sender->PacketLost(pkt.seq, pkt.type, pkt.size); } else if (pkt.type == PktType::STREAM_DATA) { m_congestionControl->PacketLost(pkt.seq); } } } }
// Autogenerated from CppHeaderCreator // Created by Sc2ad // ========================================================================= #pragma once // Begin includes #include "extern/beatsaber-hook/shared/utils/typedefs.h" #include "extern/beatsaber-hook/shared/utils/byref.hpp" // Including type: System.Net.Http.HttpMessageInvoker #include "System/Net/Http/HttpMessageInvoker.hpp" // Including type: System.TimeSpan #include "System/TimeSpan.hpp" #include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp" #include "extern/beatsaber-hook/shared/utils/il2cpp-utils-properties.hpp" #include "extern/beatsaber-hook/shared/utils/il2cpp-utils-fields.hpp" #include "extern/beatsaber-hook/shared/utils/utils.h" // Completed includes // Begin forward declares // Forward declaring namespace: System::Net::Http namespace System::Net::Http { // Forward declaring type: HttpResponseMessage class HttpResponseMessage; // Forward declaring type: HttpCompletionOption struct HttpCompletionOption; // Forward declaring type: HttpRequestMessage class HttpRequestMessage; // Forward declaring type: HttpMessageHandler class HttpMessageHandler; } // Forward declaring namespace: System namespace System { // Forward declaring type: Uri class Uri; } // Forward declaring namespace: System::Threading namespace System::Threading { // Forward declaring type: CancellationTokenSource class CancellationTokenSource; // Forward declaring type: CancellationToken struct CancellationToken; } // Forward declaring namespace: System::Net::Http::Headers namespace System::Net::Http::Headers { // Forward declaring type: HttpRequestHeaders class HttpRequestHeaders; } // Forward declaring namespace: System::Threading::Tasks namespace System::Threading::Tasks { // Forward declaring type: Task`1<TResult> template<typename TResult> class Task_1; } // Completed forward declares // Type namespace: System.Net.Http namespace System::Net::Http { // Size: 0x50 #pragma pack(push, 1) // Autogenerated type: System.Net.Http.HttpClient // [TokenAttribute] Offset: FFFFFFFF class HttpClient : public System::Net::Http::HttpMessageInvoker { public: // Writing base type padding for base size: 0x19 to desired offset: 0x20 char ___base_padding[0x7] = {}; // Nested type: System::Net::Http::HttpClient::$SendAsyncWorker$d__47 struct $SendAsyncWorker$d__47; // Nested type: System::Net::Http::HttpClient::$GetStringAsync$d__52 struct $GetStringAsync$d__52; // private System.Uri base_address // Size: 0x8 // Offset: 0x20 System::Uri* base_address; // Field size check static_assert(sizeof(System::Uri*) == 0x8); // private System.Threading.CancellationTokenSource cts // Size: 0x8 // Offset: 0x28 System::Threading::CancellationTokenSource* cts; // Field size check static_assert(sizeof(System::Threading::CancellationTokenSource*) == 0x8); // private System.Boolean disposed // Size: 0x1 // Offset: 0x30 bool disposed; // Field size check static_assert(sizeof(bool) == 0x1); // Padding between fields: disposed and: headers char __padding2[0x7] = {}; // private System.Net.Http.Headers.HttpRequestHeaders headers // Size: 0x8 // Offset: 0x38 System::Net::Http::Headers::HttpRequestHeaders* headers; // Field size check static_assert(sizeof(System::Net::Http::Headers::HttpRequestHeaders*) == 0x8); // private System.Int64 buffer_size // Size: 0x8 // Offset: 0x40 int64_t buffer_size; // Field size check static_assert(sizeof(int64_t) == 0x8); // private System.TimeSpan timeout // Size: 0x8 // Offset: 0x48 System::TimeSpan timeout; // Field size check static_assert(sizeof(System::TimeSpan) == 0x8); // Creating value type constructor for type: HttpClient HttpClient(System::Uri* base_address_ = {}, System::Threading::CancellationTokenSource* cts_ = {}, bool disposed_ = {}, System::Net::Http::Headers::HttpRequestHeaders* headers_ = {}, int64_t buffer_size_ = {}, System::TimeSpan timeout_ = {}) noexcept : base_address{base_address_}, cts{cts_}, disposed{disposed_}, headers{headers_}, buffer_size{buffer_size_}, timeout{timeout_} {} // Get static field: static private readonly System.TimeSpan TimeoutDefault static System::TimeSpan _get_TimeoutDefault(); // Set static field: static private readonly System.TimeSpan TimeoutDefault static void _set_TimeoutDefault(System::TimeSpan value); // Get instance field: private System.Uri base_address System::Uri* _get_base_address(); // Set instance field: private System.Uri base_address void _set_base_address(System::Uri* value); // Get instance field: private System.Threading.CancellationTokenSource cts System::Threading::CancellationTokenSource* _get_cts(); // Set instance field: private System.Threading.CancellationTokenSource cts void _set_cts(System::Threading::CancellationTokenSource* value); // Get instance field: private System.Boolean disposed bool _get_disposed(); // Set instance field: private System.Boolean disposed void _set_disposed(bool value); // Get instance field: private System.Net.Http.Headers.HttpRequestHeaders headers System::Net::Http::Headers::HttpRequestHeaders* _get_headers(); // Set instance field: private System.Net.Http.Headers.HttpRequestHeaders headers void _set_headers(System::Net::Http::Headers::HttpRequestHeaders* value); // Get instance field: private System.Int64 buffer_size int64_t _get_buffer_size(); // Set instance field: private System.Int64 buffer_size void _set_buffer_size(int64_t value); // Get instance field: private System.TimeSpan timeout System::TimeSpan _get_timeout(); // Set instance field: private System.TimeSpan timeout void _set_timeout(System::TimeSpan value); // public System.Int64 get_MaxResponseContentBufferSize() // Offset: 0x12737E8 int64_t get_MaxResponseContentBufferSize(); // public System.Void set_Timeout(System.TimeSpan value) // Offset: 0x12737F0 void set_Timeout(System::TimeSpan value); // static private System.Void .cctor() // Offset: 0x1274230 static void _cctor(); // public System.Threading.Tasks.Task`1<System.Net.Http.HttpResponseMessage> GetAsync(System.String requestUri, System.Net.Http.HttpCompletionOption completionOption) // Offset: 0x12739E0 System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* GetAsync(::Il2CppString* requestUri, System::Net::Http::HttpCompletionOption completionOption); // public System.Threading.Tasks.Task`1<System.Net.Http.HttpResponseMessage> SendAsync(System.Net.Http.HttpRequestMessage request, System.Net.Http.HttpCompletionOption completionOption) // Offset: 0x1273B68 System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* SendAsync(System::Net::Http::HttpRequestMessage* request, System::Net::Http::HttpCompletionOption completionOption); // public System.Threading.Tasks.Task`1<System.Net.Http.HttpResponseMessage> SendAsync(System.Net.Http.HttpRequestMessage request, System.Net.Http.HttpCompletionOption completionOption, System.Threading.CancellationToken cancellationToken) // Offset: 0x1273BF0 System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* SendAsync(System::Net::Http::HttpRequestMessage* request, System::Net::Http::HttpCompletionOption completionOption, System::Threading::CancellationToken cancellationToken); // private System.Threading.Tasks.Task`1<System.Net.Http.HttpResponseMessage> SendAsyncWorker(System.Net.Http.HttpRequestMessage request, System.Net.Http.HttpCompletionOption completionOption, System.Threading.CancellationToken cancellationToken) // Offset: 0x1273FEC System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* SendAsyncWorker(System::Net::Http::HttpRequestMessage* request, System::Net::Http::HttpCompletionOption completionOption, System::Threading::CancellationToken cancellationToken); // public System.Threading.Tasks.Task`1<System.String> GetStringAsync(System.String requestUri) // Offset: 0x1274120 System::Threading::Tasks::Task_1<::Il2CppString*>* GetStringAsync(::Il2CppString* requestUri); // private System.Threading.Tasks.Task`1<System.Net.Http.HttpResponseMessage> <>n__0(System.Net.Http.HttpRequestMessage request, System.Threading.CancellationToken cancellationToken) // Offset: 0x12742B0 System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* $$n__0(System::Net::Http::HttpRequestMessage* request, System::Threading::CancellationToken cancellationToken); // public System.Void .ctor() // Offset: 0x127355C // Implemented from: System.Object // Base method: System.Void Object::.ctor() template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary> static HttpClient* New_ctor() { static auto ___internal__logger = ::Logger::get().WithContext("System::Net::Http::HttpClient::.ctor"); return THROW_UNLESS((::il2cpp_utils::New<HttpClient*, creationType>())); } // public System.Void .ctor(System.Net.Http.HttpMessageHandler handler, System.Boolean disposeHandler) // Offset: 0x1273688 // Implemented from: System.Net.Http.HttpMessageInvoker // Base method: System.Void HttpMessageInvoker::.ctor(System.Net.Http.HttpMessageHandler handler, System.Boolean disposeHandler) template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary> static HttpClient* New_ctor(System::Net::Http::HttpMessageHandler* handler, bool disposeHandler) { static auto ___internal__logger = ::Logger::get().WithContext("System::Net::Http::HttpClient::.ctor"); return THROW_UNLESS((::il2cpp_utils::New<HttpClient*, creationType>(handler, disposeHandler))); } // protected override System.Void Dispose(System.Boolean disposing) // Offset: 0x1273934 // Implemented from: System.Net.Http.HttpMessageInvoker // Base method: System.Void HttpMessageInvoker::Dispose(System.Boolean disposing) void Dispose(bool disposing); // public override System.Threading.Tasks.Task`1<System.Net.Http.HttpResponseMessage> SendAsync(System.Net.Http.HttpRequestMessage request, System.Threading.CancellationToken cancellationToken) // Offset: 0x1273E74 // Implemented from: System.Net.Http.HttpMessageInvoker // Base method: System.Threading.Tasks.Task`1<System.Net.Http.HttpResponseMessage> HttpMessageInvoker::SendAsync(System.Net.Http.HttpRequestMessage request, System.Threading.CancellationToken cancellationToken) System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* SendAsync(System::Net::Http::HttpRequestMessage* request, System::Threading::CancellationToken cancellationToken); }; // System.Net.Http.HttpClient #pragma pack(pop) static check_size<sizeof(HttpClient), 72 + sizeof(System::TimeSpan)> __System_Net_Http_HttpClientSizeCheck; static_assert(sizeof(HttpClient) == 0x50); } DEFINE_IL2CPP_ARG_TYPE(System::Net::Http::HttpClient*, "System.Net.Http", "HttpClient"); #include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp" // Writing MetadataGetter for method: System::Net::Http::HttpClient::get_MaxResponseContentBufferSize // Il2CppName: get_MaxResponseContentBufferSize template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<int64_t (System::Net::Http::HttpClient::*)()>(&System::Net::Http::HttpClient::get_MaxResponseContentBufferSize)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(System::Net::Http::HttpClient*), "get_MaxResponseContentBufferSize", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: System::Net::Http::HttpClient::set_Timeout // Il2CppName: set_Timeout template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (System::Net::Http::HttpClient::*)(System::TimeSpan)>(&System::Net::Http::HttpClient::set_Timeout)> { static const MethodInfo* get() { static auto* value = &::il2cpp_utils::GetClassFromName("System", "TimeSpan")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Net::Http::HttpClient*), "set_Timeout", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{value}); } }; // Writing MetadataGetter for method: System::Net::Http::HttpClient::_cctor // Il2CppName: .cctor template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (*)()>(&System::Net::Http::HttpClient::_cctor)> { static const MethodInfo* get() { return ::il2cpp_utils::FindMethod(classof(System::Net::Http::HttpClient*), ".cctor", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{}); } }; // Writing MetadataGetter for method: System::Net::Http::HttpClient::GetAsync // Il2CppName: GetAsync template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* (System::Net::Http::HttpClient::*)(::Il2CppString*, System::Net::Http::HttpCompletionOption)>(&System::Net::Http::HttpClient::GetAsync)> { static const MethodInfo* get() { static auto* requestUri = &::il2cpp_utils::GetClassFromName("System", "String")->byval_arg; static auto* completionOption = &::il2cpp_utils::GetClassFromName("System.Net.Http", "HttpCompletionOption")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Net::Http::HttpClient*), "GetAsync", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{requestUri, completionOption}); } }; // Writing MetadataGetter for method: System::Net::Http::HttpClient::SendAsync // Il2CppName: SendAsync template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* (System::Net::Http::HttpClient::*)(System::Net::Http::HttpRequestMessage*, System::Net::Http::HttpCompletionOption)>(&System::Net::Http::HttpClient::SendAsync)> { static const MethodInfo* get() { static auto* request = &::il2cpp_utils::GetClassFromName("System.Net.Http", "HttpRequestMessage")->byval_arg; static auto* completionOption = &::il2cpp_utils::GetClassFromName("System.Net.Http", "HttpCompletionOption")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Net::Http::HttpClient*), "SendAsync", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{request, completionOption}); } }; // Writing MetadataGetter for method: System::Net::Http::HttpClient::SendAsync // Il2CppName: SendAsync template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* (System::Net::Http::HttpClient::*)(System::Net::Http::HttpRequestMessage*, System::Net::Http::HttpCompletionOption, System::Threading::CancellationToken)>(&System::Net::Http::HttpClient::SendAsync)> { static const MethodInfo* get() { static auto* request = &::il2cpp_utils::GetClassFromName("System.Net.Http", "HttpRequestMessage")->byval_arg; static auto* completionOption = &::il2cpp_utils::GetClassFromName("System.Net.Http", "HttpCompletionOption")->byval_arg; static auto* cancellationToken = &::il2cpp_utils::GetClassFromName("System.Threading", "CancellationToken")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Net::Http::HttpClient*), "SendAsync", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{request, completionOption, cancellationToken}); } }; // Writing MetadataGetter for method: System::Net::Http::HttpClient::SendAsyncWorker // Il2CppName: SendAsyncWorker template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* (System::Net::Http::HttpClient::*)(System::Net::Http::HttpRequestMessage*, System::Net::Http::HttpCompletionOption, System::Threading::CancellationToken)>(&System::Net::Http::HttpClient::SendAsyncWorker)> { static const MethodInfo* get() { static auto* request = &::il2cpp_utils::GetClassFromName("System.Net.Http", "HttpRequestMessage")->byval_arg; static auto* completionOption = &::il2cpp_utils::GetClassFromName("System.Net.Http", "HttpCompletionOption")->byval_arg; static auto* cancellationToken = &::il2cpp_utils::GetClassFromName("System.Threading", "CancellationToken")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Net::Http::HttpClient*), "SendAsyncWorker", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{request, completionOption, cancellationToken}); } }; // Writing MetadataGetter for method: System::Net::Http::HttpClient::GetStringAsync // Il2CppName: GetStringAsync template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<System::Threading::Tasks::Task_1<::Il2CppString*>* (System::Net::Http::HttpClient::*)(::Il2CppString*)>(&System::Net::Http::HttpClient::GetStringAsync)> { static const MethodInfo* get() { static auto* requestUri = &::il2cpp_utils::GetClassFromName("System", "String")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Net::Http::HttpClient*), "GetStringAsync", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{requestUri}); } }; // Writing MetadataGetter for method: System::Net::Http::HttpClient::$$n__0 // Il2CppName: <>n__0 template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* (System::Net::Http::HttpClient::*)(System::Net::Http::HttpRequestMessage*, System::Threading::CancellationToken)>(&System::Net::Http::HttpClient::$$n__0)> { static const MethodInfo* get() { static auto* request = &::il2cpp_utils::GetClassFromName("System.Net.Http", "HttpRequestMessage")->byval_arg; static auto* cancellationToken = &::il2cpp_utils::GetClassFromName("System.Threading", "CancellationToken")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Net::Http::HttpClient*), "<>n__0", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{request, cancellationToken}); } }; // Writing MetadataGetter for method: System::Net::Http::HttpClient::New_ctor // Il2CppName: .ctor // Cannot get method pointer of value based method overload from template for constructor! // Try using FindMethod instead! // Writing MetadataGetter for method: System::Net::Http::HttpClient::New_ctor // Il2CppName: .ctor // Cannot get method pointer of value based method overload from template for constructor! // Try using FindMethod instead! // Writing MetadataGetter for method: System::Net::Http::HttpClient::Dispose // Il2CppName: Dispose template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (System::Net::Http::HttpClient::*)(bool)>(&System::Net::Http::HttpClient::Dispose)> { static const MethodInfo* get() { static auto* disposing = &::il2cpp_utils::GetClassFromName("System", "Boolean")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Net::Http::HttpClient*), "Dispose", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{disposing}); } }; // Writing MetadataGetter for method: System::Net::Http::HttpClient::SendAsync // Il2CppName: SendAsync template<> struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<System::Threading::Tasks::Task_1<System::Net::Http::HttpResponseMessage*>* (System::Net::Http::HttpClient::*)(System::Net::Http::HttpRequestMessage*, System::Threading::CancellationToken)>(&System::Net::Http::HttpClient::SendAsync)> { static const MethodInfo* get() { static auto* request = &::il2cpp_utils::GetClassFromName("System.Net.Http", "HttpRequestMessage")->byval_arg; static auto* cancellationToken = &::il2cpp_utils::GetClassFromName("System.Threading", "CancellationToken")->byval_arg; return ::il2cpp_utils::FindMethod(classof(System::Net::Http::HttpClient*), "SendAsync", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{request, cancellationToken}); } };
#include <QtGlobal> // Automatically generated by extract_strings.py #ifdef __GNUC__ #define UNUSED __attribute__((unused)) #else #define UNUSED #endif static const char UNUSED *bitcoin_strings[] = {QT_TRANSLATE_NOOP("bitcoin-core", "CNYF version"), QT_TRANSLATE_NOOP("bitcoin-core", "Usage:"), QT_TRANSLATE_NOOP("bitcoin-core", "Send command to -server or cnyfund"), QT_TRANSLATE_NOOP("bitcoin-core", "List commands"), QT_TRANSLATE_NOOP("bitcoin-core", "Get help for a command"), QT_TRANSLATE_NOOP("bitcoin-core", "Options:"), QT_TRANSLATE_NOOP("bitcoin-core", "This help message"), QT_TRANSLATE_NOOP("bitcoin-core", "Specify configuration file (default: cnyfund.conf)"), QT_TRANSLATE_NOOP("bitcoin-core", "Specify pid file (default: cnyfund.pid)"), QT_TRANSLATE_NOOP("bitcoin-core", "Specify data directory"), QT_TRANSLATE_NOOP("bitcoin-core", "Specify wallet file (within data directory)"), QT_TRANSLATE_NOOP("bitcoin-core", "Set database cache size in megabytes (default: 25)"), QT_TRANSLATE_NOOP("bitcoin-core", "Set database disk log size in megabytes (default: 100)"), QT_TRANSLATE_NOOP("bitcoin-core", "Specify connection timeout in milliseconds (default: 5000)"), QT_TRANSLATE_NOOP("bitcoin-core", "Connect through SOCKS5 proxy"), QT_TRANSLATE_NOOP("bitcoin-core", "Use proxy to reach tor hidden services (default: same as -proxy)"), QT_TRANSLATE_NOOP("bitcoin-core", "Allow DNS lookups for -addnode, -seednode and -connect"), QT_TRANSLATE_NOOP("bitcoin-core", "Listen for connections on <port> (default: 15714 or testnet: 25714)"), QT_TRANSLATE_NOOP("bitcoin-core", "Maintain at most <n> connections to peers (default: 125)"), QT_TRANSLATE_NOOP("bitcoin-core", "Add a node to connect to and attempt to keep the connection open"), QT_TRANSLATE_NOOP("bitcoin-core", "Connect only to the specified node(s)"), QT_TRANSLATE_NOOP("bitcoin-core", "Connect to a node to retrieve peer addresses, and disconnect"), QT_TRANSLATE_NOOP("bitcoin-core", "Specify your own public address"), QT_TRANSLATE_NOOP("bitcoin-core", "Only connect to nodes in network <net> (IPv4, IPv6 or Tor)"), QT_TRANSLATE_NOOP("bitcoin-core", "Discover own IP address (default: 1 when listening and no -externalip)"), QT_TRANSLATE_NOOP("bitcoin-core", "Accept connections from outside (default: 1 if no -proxy or -connect)"), QT_TRANSLATE_NOOP("bitcoin-core", "Bind to given address. Use [host]:port notation for IPv6"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Query for peer addresses via DNS lookup, if low on addresses (default: 1 " "unless -connect)"), QT_TRANSLATE_NOOP("bitcoin-core", "Always query for peer addresses via DNS lookup (default: 0)"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Sync time with other nodes. Disable if time on your system is precise e.g. " "syncing with NTP (default: 1)"), QT_TRANSLATE_NOOP("bitcoin-core", "Threshold for disconnecting misbehaving peers (default: 100)"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Number of seconds to keep misbehaving peers from reconnecting (default: " "86400)"), QT_TRANSLATE_NOOP("bitcoin-core", "Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)"), QT_TRANSLATE_NOOP("bitcoin-core", "Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)"), QT_TRANSLATE_NOOP("bitcoin-core", "Use UPnP to map the listening port (default: 1 when listening)"), QT_TRANSLATE_NOOP("bitcoin-core", "Use UPnP to map the listening port (default: 0)"), QT_TRANSLATE_NOOP("bitcoin-core", "Fee per KB to add to transactions you send"), QT_TRANSLATE_NOOP("bitcoin-core", "" "When creating transactions, ignore inputs with value less than this " "(default: 0.01)"), QT_TRANSLATE_NOOP("bitcoin-core", "Accept command line and JSON-RPC commands"), QT_TRANSLATE_NOOP("bitcoin-core", "Run in the background as a daemon and accept commands"), QT_TRANSLATE_NOOP("bitcoin-core", "Use the test network"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Output debugging information (default: 0, supplying <category> is optional)"), QT_TRANSLATE_NOOP("bitcoin-core", "If <category> is not supplied, output all debugging information."), QT_TRANSLATE_NOOP("bitcoin-core", "<category> can be:"), QT_TRANSLATE_NOOP("bitcoin-core", "Prepend debug output with timestamp"), QT_TRANSLATE_NOOP("bitcoin-core", "Shrink debug.log file on client startup (default: 1 when no -debug)"), QT_TRANSLATE_NOOP("bitcoin-core", "Send trace/debug info to console instead of debug.log file"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Enter regression test mode, which uses a special chain in which blocks can " "be solved instantly. This is intended for regression testing tools and app " "development."), QT_TRANSLATE_NOOP("bitcoin-core", "Username for JSON-RPC connections"), QT_TRANSLATE_NOOP("bitcoin-core", "Password for JSON-RPC connections"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Listen for JSON-RPC connections on <port> (default: 15715 or testnet: 25715)"), QT_TRANSLATE_NOOP("bitcoin-core", "Allow JSON-RPC connections from specified IP address"), QT_TRANSLATE_NOOP("bitcoin-core", "Send commands to node running on <ip> (default: 127.0.0.1)"), QT_TRANSLATE_NOOP("bitcoin-core", "Wait for RPC server to start"), QT_TRANSLATE_NOOP("bitcoin-core", "Set the number of threads to service RPC calls (default: 4)"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Execute command when the best block changes (%s in cmd is replaced by block " "hash)"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Execute command when a wallet transaction changes (%s in cmd is replaced by " "TxID)"), QT_TRANSLATE_NOOP("bitcoin-core", "Require a confirmations for change (default: 0)"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Execute command when a relevant alert is received (%s in cmd is replaced by " "message)"), QT_TRANSLATE_NOOP("bitcoin-core", "Upgrade wallet to latest format"), QT_TRANSLATE_NOOP("bitcoin-core", "Set key pool size to <n> (default: 100)"), QT_TRANSLATE_NOOP("bitcoin-core", "Rescan the block chain for missing wallet transactions"), QT_TRANSLATE_NOOP("bitcoin-core", "Attempt to recover private keys from a corrupt wallet.dat"), QT_TRANSLATE_NOOP("bitcoin-core", "How many blocks to check at startup (default: 500, 0 = all)"), QT_TRANSLATE_NOOP("bitcoin-core", "How thorough the block verification is (0-6, default: 1)"), QT_TRANSLATE_NOOP("bitcoin-core", "Imports blocks from external blk000?.dat file"), QT_TRANSLATE_NOOP("bitcoin-core", "Keep at most <n> MiB of unconnectable blocks in memory (default: %u)"), QT_TRANSLATE_NOOP("bitcoin-core", "Block creation options:"), QT_TRANSLATE_NOOP("bitcoin-core", "Set minimum block size in bytes (default: 0)"), QT_TRANSLATE_NOOP("bitcoin-core", "Set maximum block size in bytes (default: 250000)"), QT_TRANSLATE_NOOP("bitcoin-core", "SSL options: (see the Bitcoin Wiki for SSL setup instructions)"), QT_TRANSLATE_NOOP("bitcoin-core", "Use OpenSSL (https) for JSON-RPC connections"), QT_TRANSLATE_NOOP("bitcoin-core", "Server certificate file (default: server.cert)"), QT_TRANSLATE_NOOP("bitcoin-core", "Server private key (default: server.pem)"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!" "3DES:@STRENGTH)"), QT_TRANSLATE_NOOP("bitcoin-core", "Warning: Deprecated argument -debugnet ignored, use -debug=net"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Error: Unsupported argument -socks found. Setting SOCKS version isn't " "possible anymore, only SOCKS5 proxies are supported."), QT_TRANSLATE_NOOP("bitcoin-core", "Invalid amount for -paytxfee=<amount>: '%s'"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Warning: -paytxfee is set very high! This is the transaction fee you will " "pay if you send a transaction."), QT_TRANSLATE_NOOP("bitcoin-core", "Invalid amount for -mininput=<amount>: '%s'"), QT_TRANSLATE_NOOP("bitcoin-core", "Initialization sanity check failed. CNYF is shutting down."), QT_TRANSLATE_NOOP("bitcoin-core", "Wallet %s resides outside data directory %s."), QT_TRANSLATE_NOOP("bitcoin-core", "" "Cannot obtain a lock on data directory %s. CNYF is probably already " "running."), QT_TRANSLATE_NOOP("bitcoin-core", "Verifying database integrity..."), QT_TRANSLATE_NOOP("bitcoin-core", "Error initializing wallet database environment %s!"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as " "wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect " "you should restore from a backup."), QT_TRANSLATE_NOOP("bitcoin-core", "wallet.dat corrupt, salvage failed"), QT_TRANSLATE_NOOP("bitcoin-core", "Unknown network specified in -onlynet: '%s'"), QT_TRANSLATE_NOOP("bitcoin-core", "Invalid -proxy address: '%s'"), QT_TRANSLATE_NOOP("bitcoin-core", "Invalid -tor address: '%s'"), QT_TRANSLATE_NOOP("bitcoin-core", "Cannot resolve -bind address: '%s'"), QT_TRANSLATE_NOOP("bitcoin-core", "Failed to listen on any port. Use -listen=0 if you want this."), QT_TRANSLATE_NOOP("bitcoin-core", "Cannot resolve -externalip address: '%s'"), QT_TRANSLATE_NOOP("bitcoin-core", "Invalid amount for -reservebalance=<amount>"), QT_TRANSLATE_NOOP("bitcoin-core", "Loading block index..."), QT_TRANSLATE_NOOP("bitcoin-core", "Error loading block database"), QT_TRANSLATE_NOOP("bitcoin-core", "Loading wallet..."), QT_TRANSLATE_NOOP("bitcoin-core", "Error loading wallet.dat: Wallet corrupted"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Warning: error reading wallet.dat! All keys read correctly, but transaction " "data or address book entries might be missing or incorrect."), QT_TRANSLATE_NOOP("bitcoin-core", "Error loading wallet.dat: Wallet requires newer version of CNYF"), QT_TRANSLATE_NOOP("bitcoin-core", "Wallet needed to be rewritten: restart CNYF to complete"), QT_TRANSLATE_NOOP("bitcoin-core", "Error loading wallet.dat"), QT_TRANSLATE_NOOP("bitcoin-core", "Cannot downgrade wallet"), QT_TRANSLATE_NOOP("bitcoin-core", "Cannot write default address"), QT_TRANSLATE_NOOP("bitcoin-core", "Rescanning..."), QT_TRANSLATE_NOOP("bitcoin-core", "Loading addresses..."), QT_TRANSLATE_NOOP("bitcoin-core", "Done loading"), QT_TRANSLATE_NOOP("bitcoin-core", "Warning: This version is obsolete, upgrade required!"), QT_TRANSLATE_NOOP("bitcoin-core", "Error: Disk space is low!"), QT_TRANSLATE_NOOP("bitcoin-core", "" "This is a pre-release test build - use at your own risk - do not use for " "mining or merchant applications"), QT_TRANSLATE_NOOP("bitcoin-core", "Error"), QT_TRANSLATE_NOOP("bitcoin-core", "Warning"), QT_TRANSLATE_NOOP("bitcoin-core", "Information"), QT_TRANSLATE_NOOP("bitcoin-core", "" "Warning: Please check that your computer's date and time are correct! If " "your clock is wrong CNYF will not work properly."), QT_TRANSLATE_NOOP("bitcoin-core", "" "Unable to bind to %s on this computer. CNYF is probably already running."), QT_TRANSLATE_NOOP("bitcoin-core", "Unable to bind to %s on this computer (bind returned error %d, %s)"), QT_TRANSLATE_NOOP("bitcoin-core", "Error: Wallet locked, unable to create transaction!"), QT_TRANSLATE_NOOP("bitcoin-core", "Error: Wallet unlocked for staking only, unable to create transaction."), QT_TRANSLATE_NOOP("bitcoin-core", "" "Error: This transaction requires a transaction fee of at least %s because of " "its amount, complexity, or use of recently received funds!"), QT_TRANSLATE_NOOP("bitcoin-core", "Error: Transaction creation failed!"), QT_TRANSLATE_NOOP("bitcoin-core", "Sending..."), QT_TRANSLATE_NOOP("bitcoin-core", "" "Error: The transaction was rejected! This might happen if some of the coins " "in your wallet were already spent, such as if you used a copy of wallet.dat " "and coins were spent in the copy but not marked as spent here."), QT_TRANSLATE_NOOP("bitcoin-core", "Invalid amount"), QT_TRANSLATE_NOOP("bitcoin-core", "Insufficient funds"), QT_TRANSLATE_NOOP("bitcoin-core", "" "You must set rpcpassword=<password> in the configuration file:\n" "%s\n" "If the file does not exist, create it with owner-readable-only file " "permissions."), QT_TRANSLATE_NOOP("bitcoin-core", "To use the %s option"), QT_TRANSLATE_NOOP("bitcoin-core", "" "%s, you must set a rpcpassword in the configuration file:\n" "%s\n" "It is recommended you use the following random password:\n" "rpcuser=cnyfundrpc\n" "rpcpassword=%s\n" "(you do not need to remember this password)\n" "The username and password MUST NOT be the same.\n" "If the file does not exist, create it with owner-readable-only file " "permissions.\n" "It is also recommended to set alertnotify so you are notified of problems;\n" "for example: alertnotify=echo %%s | mail -s \"CNYF Alert\" admin@foo." "com\n"), QT_TRANSLATE_NOOP("bitcoin-core", "" "An error occurred while setting up the RPC port %u for listening on IPv6, " "falling back to IPv4: %s"), QT_TRANSLATE_NOOP("bitcoin-core", "" "An error occurred while setting up the RPC port %u for listening on IPv4: %s"), };
#include <algorithm> #include <utility> #include <arbor/util/optional.hpp> #include <arbor/cable_cell.hpp> #include <arbor/morph/morphology.hpp> #include <arbor/morph/locset.hpp> #include <arbor/morph/region.hpp> #include "fvm_layout.hpp" #include "util/rangeutil.hpp" #include "common.hpp" #include "common_morphologies.hpp" #include "morph_pred.hpp" #include "../common_cells.hpp" using namespace arb; using util::make_span; ::testing::AssertionResult verify_cv_children(const cv_geometry& g) { unsigned visited_children = 0; for (unsigned i = 0; i<g.size(); ++i) { if (!util::is_sorted(g.children(i))) { return ::testing::AssertionFailure() << "CV " << i << " has unsorted sequence of child CVs"; } for (auto cv: g.children(i)) { if ((fvm_index_type)i != g.cv_parent.at(cv)) { return ::testing::AssertionFailure() << "CV " << i << " has child CV " << cv << " which has parent " << g.cv_parent.at(cv); } ++visited_children; } } if (g.cv_children.size()!=visited_children) { return ::testing::AssertionFailure() << "geometry child CV count " << g.cv_children.size() << " does not equal number of visited children " << visited_children; } unsigned n_nonempty_cells = 0; for (auto c: util::make_span(g.n_cell())) { if (!g.cell_cvs(c).empty()) { ++n_nonempty_cells; } } if (g.cv_children.size()!=g.size()-n_nonempty_cells) { return ::testing::AssertionFailure() << "child CV count " << g.cv_children.size() << " plus root CV count " << n_nonempty_cells << " does not equal total number of CVs " << g.size(); } return ::testing::AssertionSuccess(); } namespace arb { namespace cv_prefer { std::ostream& operator<<(std::ostream& out, ::arb::cv_prefer::type p) { switch (p) { case cv_proximal: return out << "cv_proximal"; case cv_distal: return out << "cv_distal"; case cv_empty: return out << "cv_empty"; case cv_nonempty: return out << "cv_nonempty"; default: return out; } } } } TEST(cv_geom, empty) { using namespace common_morphology; cable_cell empty_cell{m_empty}; cv_geometry geom = cv_geometry_from_ends(empty_cell, ls::nil()); EXPECT_TRUE(verify_cv_children(geom)); EXPECT_TRUE(geom.cv_parent.empty()); EXPECT_TRUE(geom.cv_cables.empty()); EXPECT_TRUE(geom.cv_cables_divs.empty()); EXPECT_EQ(0u, geom.size()); // size()/empty() reflects number of CVs. EXPECT_EQ(1u, geom.n_cell()); // can have no CVs but >0 cells. } static bool region_eq(const mprovider& p, region a, region b) { return thingify(a, p)==thingify(b, p); } TEST(cv_geom, trivial) { using namespace common_morphology; for (auto& p: test_morphologies) { if (p.second.empty()) continue; SCOPED_TRACE(p.first); cable_cell cell{p.second}; auto& m = cell.morphology(); // Equivalent ways of specifying one CV comprising whole cell: cv_geometry geom1 = cv_geometry_from_ends(cell, ls::nil()); cv_geometry geom2 = cv_geometry_from_ends(cell, ls::terminal()); EXPECT_TRUE(verify_cv_children(geom1)); EXPECT_TRUE(verify_cv_children(geom2)); EXPECT_EQ(1u, geom1.size()); EXPECT_EQ(geom1.cv_cables, geom2.cv_cables); // These are equivalent too, if there is a single root branch. cv_geometry geom3 = cv_geometry_from_ends(cell, ls::root()); cv_geometry geom4 = cv_geometry_from_ends(cell, join(ls::root(), ls::terminal())); EXPECT_TRUE(verify_cv_children(geom3)); EXPECT_TRUE(verify_cv_children(geom4)); EXPECT_EQ(geom3.cv_cables, geom4.cv_cables); if (m.branch_children(mnpos).size()==1) { EXPECT_EQ(geom1.cv_cables, geom4.cv_cables); } mcable_list geom1_cables = util::assign_from(geom1.cables(0)); EXPECT_TRUE(region_eq(cell.provider(), reg::all(), geom1_cables)); } } TEST(cv_geom, one_cv_per_branch) { using namespace common_morphology; auto super = [] (const arb::morphology& m, arb::mcable c) { return thingify(arb::reg::super(arb::region(c)), arb::mprovider(m)).cables(); }; for (auto& p: test_morphologies) { if (p.second.empty()) continue; SCOPED_TRACE(p.first); cable_cell cell{p.second}; auto& m = cell.morphology(); cv_geometry geom = cv_geometry_from_ends(cell, sum(ls::on_branches(0), ls::on_branches(1))); EXPECT_TRUE(verify_cv_children(geom)); // Expect trivial CVs at every fork point, and single-cable CVs for each branch. std::vector<unsigned> seen_branches(m.num_branches(), 0); auto n_branch_child = [&m](msize_t b) { return m.branch_children(b).size(); }; for (auto i: make_span(geom.size())) { auto cables = geom.cables(i); auto c = cables.front(); if (c.prox_pos==c.dist_pos) { EXPECT_LT(1u, cables.size()); if (c.branch==0 && c.prox_pos==0) { EXPECT_TRUE(n_branch_child(mnpos)>1); } else { EXPECT_EQ(1., c.prox_pos); EXPECT_TRUE(n_branch_child(c.branch)>1); } // Cables in trivial CV should be the same as those in the extent over the point. EXPECT_TRUE(testing::seq_eq(super(m,c), cables)); } else { ASSERT_EQ(1u, cables.size()); ++seen_branches[c.branch]; EXPECT_EQ(1., seen_branches[c.branch]); EXPECT_EQ(0., c.prox_pos); EXPECT_EQ(1., c.dist_pos); // Confirm parent CV is fork CV: if (i>0) { auto fork_ext = super(m, {c.branch, 0}); mcable_list pcables = util::assign_from(geom.cables(geom.cv_parent[i])); ASSERT_TRUE(testing::cablelist_eq(fork_ext, pcables)); } } } EXPECT_TRUE(std::find(seen_branches.begin(), seen_branches.end(), 0)==seen_branches.end()); } } TEST(cv_geom, midpoints) { using namespace common_morphology; // Place CV boundaries at the midpoints of each branch. for (auto& p: test_morphologies) { if (p.second.empty()) continue; SCOPED_TRACE(p.first); cable_cell cell{p.second}; auto& m = cell.morphology(); cv_geometry geom = cv_geometry_from_ends(cell, ls::on_branches(0.5)); EXPECT_TRUE(verify_cv_children(geom)); // Expect CVs to be either: covering fork points, with one cable per branch // at the fork (for a multiple-root-branch morphology, this would be treating // (0, 0) as a fork); or the last halves of terminal branches or the first half // of a unique root branch. auto n_branch_child = [&m](msize_t b) { return m.branch_children(b).size(); }; for (auto i: make_span(geom.size())) { auto cables = geom.cables(i); if (i==0) { // Expect inital half of single branch cell, or branched CV around (0,0). if (cables.size()==1) { EXPECT_EQ(1u, n_branch_child(mnpos)); auto c = cables.front(); EXPECT_EQ(0u, c.branch); EXPECT_EQ(0.0, c.prox_pos); EXPECT_EQ(0.5, c.dist_pos); } else { EXPECT_TRUE(n_branch_child(mnpos)>1); for (auto& c: cables) { auto x = canonical(m, mlocation{c.branch, 0.}); EXPECT_EQ(0u, x.branch); EXPECT_EQ(0.0, c.prox_pos); EXPECT_EQ(0.5, c.dist_pos); } } } else { // Expect final half of terminal branch or a branched CV around an interior fork. if (cables.size()==1) { // Terminal segment, or initial segment of 1-branch cell. auto c = cables.front(); EXPECT_EQ(0.5, c.prox_pos); EXPECT_EQ(1.0, c.dist_pos); EXPECT_EQ(0u, n_branch_child(c.branch)); } else { auto prox_cable = cables.front(); EXPECT_EQ(0.5, prox_cable.prox_pos); EXPECT_EQ(1.0, prox_cable.dist_pos); msize_t prox_branch = prox_cable.branch; EXPECT_EQ(1+n_branch_child(prox_branch), cables.size()); for (unsigned j = 1; j<cables.size(); ++j) { auto& c = cables[j]; EXPECT_EQ(0.0, c.prox_pos); EXPECT_EQ(0.5, c.dist_pos); auto x = canonical(m, mlocation{c.branch, 0.}); EXPECT_EQ(prox_branch, x.branch); } } } } } } TEST(cv_geom, weird) { // m_reg_b6 has the following branch structure: // // ---0---+---1---+---3--- // | | // | +---4--- // 2 | // | +---5--- // | // // By placing CV boundary points at (1,0) and (4,0), we // should obtain 3 CVs 'o', '+' and '=' as: // // // oooooooo+++++++++++++++ // o + // o +======= // o + // o ++++++++ // o // // CV 0 will comprise branches 0 and 2; CV 1 branches 1, 3, 5; // and CV 2 branch 4. CV 0 will also cover the fork point (0,1); // CV 1 will cover the fork point (1, 1). using C = mcable; using testing::seq_eq; cable_cell cell{common_morphology::m_reg_b6}; cv_geometry geom = cv_geometry_from_ends(cell, mlocation_list{{1, 0}, {4,0}}); EXPECT_TRUE(verify_cv_children(geom)); ASSERT_EQ(3u, geom.size()); mcable_list expected0 = {C{0u, 0., 1.}, C{1u, 0., 0.}, C{2u, 0., 1.}}; EXPECT_TRUE(seq_eq(expected0, geom.cables(0))); mcable_list expected1 = {C{1u, 0., 1.}, C{3u, 0., 1.}, C{4u, 0., 0.}, C{5u, 0., 1.}}; EXPECT_TRUE(seq_eq(expected1, geom.cables(1))); mcable_list expected2 = {C{4u, 0., 1.}}; EXPECT_TRUE(seq_eq(expected2, geom.cables(2))); } TEST(cv_geom, location_cv) { using namespace common_morphology; cable_cell cell{m_reg_b6}; auto& m = cell.morphology(); auto cv_extent = [](const cv_geometry& geom, auto cv) { mcable_list cl; util::assign(cl, geom.cables(cv)); return mextent(cl); }; auto super = [] (const arb::morphology& m, arb::mcable c) { return thingify(arb::reg::super(arb::region(c)), arb::mprovider(m)).cables(); }; // Two CVs per branch, plus trivial CV at forks. cv_geometry geom = cv_geometry_from_ends(cell, join(ls::on_branches(0.), ls::on_branches(0.5), ls::on_branches(1.))); // Confirm CVs are either trivial or a single cable covering half a branch. for (auto cv: geom.cell_cvs(0)) { auto cables = geom.cables(cv); if (cables.size()==1u) { // Half branch cable. mcable cable = cables.front(); ASSERT_TRUE((cable.prox_pos==0 && cable.dist_pos==0.5 ) || (cable.prox_pos==0.5 && cable.dist_pos==1.)); } else { // Trivial CV over fork point. mcable cable0 = cables.front(); ASSERT_TRUE(cable0.prox_pos==cable0.dist_pos); mcable_list clist = util::assign_from(cables); ASSERT_TRUE(testing::cablelist_eq(super(m, cable0), clist)); } } // For positions strictly within a CV extent, CV preference should make no difference. for (auto prefer: {cv_prefer::cv_distal, cv_prefer::cv_proximal, cv_prefer::cv_nonempty, cv_prefer::cv_empty}) { SCOPED_TRACE(prefer); for (auto bid: util::make_span(m.num_branches())) { for (double pos: {0.3, 0.7}) { mlocation loc{bid, pos}; SCOPED_TRACE(loc); auto cv = geom.location_cv(0, loc, prefer); ASSERT_TRUE(cv_extent(geom, cv).intersects(loc)); ASSERT_EQ(1u, geom.cables(cv).size()); mcable cable = geom.cables(cv).front(); EXPECT_TRUE(cable.branch==loc.branch); EXPECT_TRUE(cable.prox_pos<cable.dist_pos); } } } // For positions in the middle of a branch, we should get distal CV unless // CV prerence is `cv_proximal`. for (auto prefer: {cv_prefer::cv_distal, cv_prefer::cv_proximal, cv_prefer::cv_nonempty, cv_prefer::cv_empty}) { SCOPED_TRACE(prefer); for (auto bid: util::make_span(m.num_branches())) { mlocation loc{bid, 0.5}; SCOPED_TRACE(loc); auto cv = geom.location_cv(0, loc, prefer); ASSERT_TRUE(cv_extent(geom, cv).intersects(loc)); ASSERT_EQ(1u, geom.cables(cv).size()); mcable cable = geom.cables(cv).front(); EXPECT_TRUE(cable.branch==loc.branch); EXPECT_TRUE(cable.prox_pos<cable.dist_pos); if (prefer==cv_prefer::cv_proximal) { EXPECT_EQ(0., cable.prox_pos); } else { EXPECT_EQ(0.5, cable.prox_pos); } } } // For the head of a non-root branch, we should get the trivial CV over the // fork for `cv_proximal` or `cv_empty`; otherwise the CV over the first // half of the branch. for (auto prefer: {cv_prefer::cv_distal, cv_prefer::cv_proximal, cv_prefer::cv_nonempty, cv_prefer::cv_empty}) { SCOPED_TRACE(prefer); for (auto bid: util::make_span(m.num_branches())) { if (m.branch_parent(bid)==mnpos) continue; mlocation loc{bid, 0.}; SCOPED_TRACE(loc); auto cv = geom.location_cv(0, loc, prefer); ASSERT_TRUE(cv_extent(geom, cv).intersects(loc)); auto cables = geom.cables(cv); switch (prefer) { case cv_prefer::cv_proximal: case cv_prefer::cv_empty: EXPECT_NE(1u, cables.size()); break; case cv_prefer::cv_distal: case cv_prefer::cv_nonempty: EXPECT_EQ(1u, cables.size()); EXPECT_EQ(0.5, cables.front().dist_pos); break; } } } // For the tail of a non-terminal branch, we should get the trivial CV over the // fork for `cv_distal` or `cv_empty`; otherwise the CV over the second // half of the branch. for (auto prefer: {cv_prefer::cv_distal, cv_prefer::cv_proximal, cv_prefer::cv_nonempty, cv_prefer::cv_empty}) { SCOPED_TRACE(prefer); for (auto bid: util::make_span(m.num_branches())) { if (m.branch_children(bid).empty()) continue; mlocation loc{bid, 1.}; SCOPED_TRACE(loc); auto cv = geom.location_cv(0, loc, prefer); ASSERT_TRUE(cv_extent(geom, cv).intersects(loc)); auto cables = geom.cables(cv); switch (prefer) { case cv_prefer::cv_proximal: case cv_prefer::cv_nonempty: EXPECT_EQ(1u, cables.size()); EXPECT_EQ(0.5, cables.front().prox_pos); break; case cv_prefer::cv_distal: case cv_prefer::cv_empty: EXPECT_NE(1u, cables.size()); break; } } } } TEST(cv_geom, multicell) { using namespace common_morphology; using index_type = cv_geometry::index_type; cable_cell cell = cable_cell{m_reg_b6}; cv_geometry geom = cv_geometry_from_ends(cell, ls::on_branches(0.5)); unsigned n_cv = geom.size(); cv_geometry geom2 = geom; append(geom2, geom); EXPECT_TRUE(verify_cv_children(geom)); ASSERT_EQ(2*n_cv, geom2.size()); for (unsigned i = 0; i<n_cv; ++i) { EXPECT_EQ(geom.cv_parent[i], geom2.cv_parent[i]); if (geom2.cv_parent[i]==-1) { EXPECT_EQ(-1, geom2.cv_parent[i+n_cv]); } else { EXPECT_EQ(geom2.cv_parent[i]+(int)n_cv, geom2.cv_parent[i+n_cv]); } EXPECT_EQ(0, geom2.cv_to_cell[i]); EXPECT_EQ(1, geom2.cv_to_cell[i+n_cv]); mcable_list cables, cables1, cables2; util::assign(cables, geom.cables(i)); util::assign(cables1, geom2.cables(i)); util::assign(cables2, geom2.cables(i+n_cv)); EXPECT_EQ(cables, cables1); EXPECT_EQ(cables, cables2); } EXPECT_EQ((std::pair<index_type, index_type>(0, n_cv)), geom2.cell_cv_interval(0)); EXPECT_EQ((std::pair<index_type, index_type>(n_cv, 2*n_cv)), geom2.cell_cv_interval(1)); }
/* Copyright 2020 Canaan Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <nncase/ir/ops/conv2d.h> #include <nncase/ir/ops/pad.h> #include <nncase/ir/visitor.h> #include <nncase/transforms/neutral/fuse_pad.h> using namespace nncase; using namespace nncase::ir; using namespace nncase::ir::transforms; bool fuse_pad_conv2d_transform::on_try_match(node &node, transform_context &context) { if (node.runtime_opcode() == op_pad) { auto &p = static_cast<pad &>(node); if (p.paddings().size() == 4 && p.paddings()[2].before >= 0 && p.paddings()[2].after >= 0 && p.paddings()[3].before >= 0 && p.paddings()[3].after >= 0 && (p.paddings()[2].sum() > 0 || p.paddings()[3].sum() > 0) && p.pad_mode() == pad_constant && p.pad_value().as<float>() == 0.f) { if (auto conv = try_get_direct_child<conv2d>(p)) { context.inputs.emplace_back(&p.input()); context.inputs.emplace_back(&conv->weights()); context.inputs.emplace_back(&conv->bias()); context.outputs.emplace_back(&conv->output()); context.matched_nodes.emplace_back(&p); context.matched_nodes.emplace_back(conv); return true; } } } return false; } void fuse_pad_conv2d_transform::process(transform_context &context) { auto &output = *context.inputs[0]->connection(); auto &weights = *context.inputs[1]->connection(); auto &bias = *context.inputs[2]->connection(); auto inputs = context.outputs[0]->connections(); auto &old_p = static_cast<pad &>(*context.matched_nodes[0]); auto &old_conv = static_cast<conv2d &>(*context.matched_nodes[1]); auto paddings = old_p.paddings(); std::array<padding, 2> conv_paddings { old_conv.padding_h(), old_conv.padding_w() }; for (size_t i = 2; i < 4; i++) { auto &src = paddings[i]; auto &dest = conv_paddings[i - 2]; if (src.before > 0) { dest.before += src.before; src.before = 0; } if (src.after > 0) { dest.after += src.after; src.after = 0; } } auto p = context.graph.emplace<pad>(old_p.output().type(), output.shape(), paddings, old_p.pad_mode(), old_p.pad_value()); p->name(old_p.name()); auto conv = context.graph.emplace<conv2d>(p->output().shape(), old_conv.weights().shape(), old_conv.groups(), conv_paddings[0], conv_paddings[1], old_conv.stride_h(), old_conv.stride_w(), old_conv.dilation_h(), old_conv.dilation_w(), old_conv.fused_activation()); conv->name(old_conv.name()); conv->input().connect(p->output()); conv->weights().connect(weights); conv->bias().connect(bias); p->input().connect(output); for (auto &in : dup(inputs)) in->connect(conv->output()); }
#include "WALL.h" #include "Simulation.h" #include "Utils/Random.h" Element* WALL::clone_impl() const { return new WALL(*this); } WALL::WALL(Simulation& sim) { identifier = EL_WALL; name = "WALL"; description = "WALL GOL Rule"; rule_string = "s2345/b45678"; state = ST_SOLID; colors = { sf::Color(33, 77, 255) }; color = colors[0]; process_rules(); this->sim = &sim; } WALL::WALL(const WALL& rhs) { Element::element_copy(rhs); rule_string = rhs.rule_string; process_rules(); } WALL::~WALL() { }
/* * Copyright (c) 2018 MariaDB Corporation Ab * * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file and at www.mariadb.com/bsl11. * * Change Date: 2025-04-28 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2 or later of the General * Public License. */ #pragma once /** * @file galeramon.hh - The Galera cluster monitor */ #include <maxscale/ccdefs.hh> #include <unordered_map> #include <maxscale/monitor.hh> /** * Galera status variables */ struct GaleraNode { int joined; /**< Node is in sync with the cluster */ int local_index; /**< Node index */ int local_state; /**< Node state */ int cluster_size; /**< The cluster size*/ std::string cluster_uuid; /**< Cluster UUID */ std::string gtid_binlog_pos; std::string gtid_current_pos; bool read_only = false; int master_id; int server_id; }; typedef std::unordered_map<mxs::MonitorServer*, GaleraNode> NodeMap; class GaleraMonitor : public maxscale::MonitorWorkerSimple { public: GaleraMonitor(const GaleraMonitor&) = delete; GaleraMonitor& operator=(const GaleraMonitor&) = delete; ~GaleraMonitor(); static GaleraMonitor* create(const std::string& name, const std::string& module); json_t* diagnostics() const; json_t* diagnostics(mxs::MonitorServer* server) const override; protected: bool configure(const mxs::ConfigParameters* param); bool has_sufficient_permissions(); void update_server_status(mxs::MonitorServer* monitored_server); void pre_tick(); void post_tick(); private: int m_disableMasterFailback; /**< Monitor flag for Galera Cluster Master failback */ int m_availableWhenDonor; /**< Monitor flag for Galera Cluster Donor availability */ bool m_disableMasterRoleSetting; /**< Monitor flag to disable setting master role */ bool m_root_node_as_master; /**< Whether we require that the Master should * have a wsrep_local_index of 0 */ bool m_use_priority; /**< Use server priorities */ bool m_set_donor_nodes; /**< set the wrep_sst_donor variable with an * ordered list of nodes */ std::string m_cluster_uuid; /**< The Cluster UUID */ bool m_log_no_members; /**< Should we log if no member are found. */ NodeMap m_info; /**< Contains Galera Cluster variables of all nodes */ NodeMap m_prev_info; /**< Contains the info from the previous tick */ int m_cluster_size; /**< How many nodes in the cluster */ // Prevents concurrent use that might occur during the diagnostics_json call mutable std::mutex m_lock; GaleraMonitor(const std::string& name, const std::string& module); bool detect_cluster_size(const int n_nodes, const char* candidate_uuid, const int candidate_size); mxs::MonitorServer* get_candidate_master(); void set_galera_cluster(); void update_sst_donor_nodes(int is_cluster); void calculate_cluster(); };
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // UNSUPPORTED: libcpp-has-no-threads // FLAKY_TEST. // <mutex> // template <class Mutex> class unique_lock; // void lock(); #include <mutex.hxx> #include <thread.hxx> #include <cstdlib.hxx> #include <cassert.hxx> #include "test_macros.h" std::mutex m; typedef std::chrono::system_clock Clock; typedef Clock::time_point time_point; typedef Clock::duration duration; typedef std::chrono::milliseconds ms; typedef std::chrono::nanoseconds ns; void f() { std::unique_lock<std::mutex> lk(m, std::defer_lock); time_point t0 = Clock::now(); lk.lock(); time_point t1 = Clock::now(); assert(lk.owns_lock() == true); ns d = t1 - t0 - ms(250); assert(d < ms(25)); // within 25ms #ifndef TEST_HAS_NO_EXCEPTIONS try { lk.lock(); assert(false); } catch (std::system_error& e) { assert(e.code().value() == EDEADLK); } #endif lk.unlock(); lk.release(); #ifndef TEST_HAS_NO_EXCEPTIONS try { lk.lock(); assert(false); } catch (std::system_error& e) { assert(e.code().value() == EPERM); } #endif } int main(int, char**) { m.lock(); std::thread t(f); std::this_thread::sleep_for(ms(250)); m.unlock(); t.join(); return 0; }
/* * utility.cpp * * Created on: Apr 27, 2017 * Author: Paul */ #include <os/utility.hpp> namespace os { } /* namespace os */
//===--- menu_component.hpp - -----------------------------------*- C++ -*-===// // // Head First Design Patterns // // //===----------------------------------------------------------------------===// /// /// \file /// \brief /// //===----------------------------------------------------------------------===// #ifndef _HFDP_CPP_COMPOSITE_MENUS_MENU_COMPONENT_HPP_ #define _HFDP_CPP_COMPOSITE_MENUS_MENU_COMPONENT_HPP_ //https://google.github.io/styleguide/cppguide.html#Names_and_Order_of_Includes //dir2 / foo2.h. //C system files. //C++ system files. #include <memory> #include <string> //Other libraries' .h files. //Your project's .h files. #include "menu_component.hpp" namespace headfirst { class MenuComponent { // Disable copy constructor and assignment operator MenuComponent( const MenuComponent& ) = delete; void operator=( const MenuComponent& ) = delete; public: MenuComponent(); virtual ~MenuComponent(); //Menu only virtual void Add( std::unique_ptr<MenuComponent> menu_component ); //These two APIs are ignored : trivial and worthless //virtual void Remove( MenuComponent* menu_component );//Menu only //virtual MenuComponent* GetChild( int i ) const; //Menu only virtual std::string GetName() const = 0; virtual std::string GetDescription() const = 0; virtual double GetPrice() const; //MenuItem only virtual bool IsVegetarian() const; //MenuItem only virtual void Print() const = 0; }; } //namespace headfirst #endif
#ifndef BEYOND_CORE_UTILS_BYTE_SIZE_HPP #define BEYOND_CORE_UTILS_BYTE_SIZE_HPP #include "force_inline.hpp" #include <vector> namespace beyond { /** * @brief Gets the data size in bytes of a container */ template <typename T> [[nodiscard]] BEYOND_FORCE_INLINE constexpr auto byte_size(const std::vector<T>& v) noexcept -> std::uint32_t { return static_cast<std::uint32_t>(v.size() * sizeof(T)); } template <typename T, std::uint32_t N> [[nodiscard]] BEYOND_FORCE_INLINE constexpr auto byte_size(T (&/*arr*/)[N]) noexcept -> std::uint32_t { return N * sizeof(T); } } // namespace beyond #endif // BEYOND_CORE_UTILS_BYTE_SIZE_HPP
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "src/delegate/npu/npu_subgraph.h" #include <set> #include <unordered_map> #include <utility> #include "include/errorcode.h" #include "include/graph/operator.h" #include "include/graph/graph.h" #include "include/graph/op/const_defs.h" #include "include/graph/model.h" #include "include/hiai_ir_build.h" #include "include/version.h" #include "src/common/utils.h" #include "src/delegate/npu/npu_converter_utils.h" namespace mindspore { static std::set<mindspore::schema::PrimitiveType> npu_specific_weight_nodes = { schema::PrimitiveType_Conv2DFusion, schema::PrimitiveType_Conv2dTransposeFusion, schema::PrimitiveType_PadFusion, schema::PrimitiveType_BatchNorm, schema::PrimitiveType_FullConnection, schema::PrimitiveType_InstanceNorm, schema::PrimitiveType_TileFusion}; NPUSubGraph::~NPUSubGraph() { subgraph_input_ops_.clear(); subgraph_output_ops_.clear(); out_tensor_sorted_.clear(); for (auto op : op_buffer_) { delete op; } if (executor_ != nullptr) { delete executor_; } op_buffer_.clear(); } void NPUSubGraph::set_input(mindspore::MSTensor in_tensor, int index) { MS_ASSERT(index < inputs_.size()); auto origin_tensor = inputs_[index]; // only in_ops_ input tensors list used in execute function for (auto op : in_ops_) { for (size_t i = 0; i < op->inputs().size(); i++) { if (op->inputs()[i] == origin_tensor) { op->set_input(in_tensor, i); } } } this->inputs_[index] = in_tensor; } void NPUSubGraph::set_output(mindspore::MSTensor out_tensor, int index) { MS_ASSERT(index < out_tensor_sorted_.size()); auto origin_tensor = outputs_[index]; for (size_t i = 0; i < out_tensor_sorted_.size(); i++) { if (out_tensor_sorted_[i] == origin_tensor) { out_tensor_sorted_[i] = out_tensor; } } outputs_[index] = out_tensor; } int NPUSubGraph::GetGraphInOutOps() { for (auto in_tensor : this->inputs()) { for (auto op : npu_ops_) { if (find(op->inputs().begin(), op->inputs().end(), in_tensor) != op->inputs().end() && find(in_ops_.begin(), in_ops_.end(), op) == in_ops_.end()) { in_ops_.push_back(op); } } } if (in_ops_.empty()) { MS_LOG(ERROR) << "Can't find the input ops for npu sub graph."; return RET_ERROR; } for (auto out_tensor : this->outputs()) { for (auto op : npu_ops_) { if (find(op->outputs().begin(), op->outputs().end(), out_tensor) != op->outputs().end() && find(out_ops_.begin(), out_ops_.end(), op) == out_ops_.end()) { out_ops_.push_back(op); } } } if (out_ops_.empty()) { MS_LOG(ERROR) << "Can't find the output ops for npu sub graph."; return RET_ERROR; } return RET_OK; } std::vector<NPUOp *> NPUSubGraph::FindPreOps(NPUOp *cur_op) { std::vector<NPUOp *> in_ops; for (auto in_tensor : cur_op->inputs()) { for (auto op : npu_ops_) { if (find(op->outputs().begin(), op->outputs().end(), in_tensor) != op->outputs().end()) { in_ops.push_back(op); } } } return in_ops; } std::shared_ptr<domi::ModelBufferData> NPUSubGraph::BuildIRModel() { ge::Graph graph("NPUGraph"); auto ret = BuildNPUInputOp(); if (ret != RET_OK) { MS_LOG(ERROR) << "Build NPU input operator failed."; return nullptr; } ret = BuildNPUOutputOp(); if (ret != RET_OK) { MS_LOG(ERROR) << "Build NPU output operator failed."; return nullptr; } graph.SetInputs(subgraph_input_ops_).SetOutputs(subgraph_output_ops_); ge::Model model(GetOMModelName(), mindspore::lite::Version()); model.SetGraph(graph); domi::HiaiIrBuild ir_build; auto om_model_buff = std::make_shared<domi::ModelBufferData>(); if (om_model_buff == nullptr) { MS_LOG(ERROR) << "OM model buffer is nullptr."; return nullptr; } if (!ir_build.CreateModelBuff(model, *om_model_buff)) { MS_LOG(ERROR) << "Create model buffer failed."; return nullptr; } if (!ir_build.BuildIRModel(model, *om_model_buff)) { MS_LOG(ERROR) << "Build IR model failed."; ir_build.ReleaseModelBuff(*om_model_buff); return nullptr; } return om_model_buff; } int NPUSubGraph::Execute() { return executor_->Run(inputs(), out_tensor_sorted_, in_ops_); } int NPUSubGraph::BuildNPUInputOp() { int count = 0; subgraph_input_ops_.clear(); op_buffer_.clear(); for (auto op : this->npu_ops_) { std::vector<ge::Operator *> input_ops; std::unordered_map<int, std::pair<ge::Operator *, int>> index2_multi_out_index; for (int i = 0; i < op->inputs().size(); ++i) { auto in_tensor = op->inputs()[i]; if (IsSubGraphInputTensor(in_tensor)) { auto tensor_name = "Input_" + std::to_string(count++) + '_' + op->name(); hiai::op::Data *data = nullptr; data = ConverterToNPUData(in_tensor, tensor_name); subgraph_input_ops_.push_back(*data); input_ops.push_back(data); op_buffer_.push_back(data); continue; } bool is_weight_tensor = true; auto pre_ops = FindPreOps(op); for (auto pre_op : pre_ops) { if (find(pre_op->outputs().begin(), pre_op->outputs().end(), in_tensor) != pre_op->outputs().end()) { // input come from npu auto npu_op = reinterpret_cast<NPUOp *>(pre_op)->GetNPUOp(); if (npu_op == nullptr) { MS_LOG(ERROR) << pre_op->name() << "'s npu operator is nullptr."; return RET_ERROR; } input_ops.push_back(npu_op); if (pre_op->outputs().size() != 1) { // in_op has multi output, we record which output we want. int out_index = std::find(pre_op->outputs().begin(), pre_op->outputs().end(), in_tensor) - pre_op->outputs().begin(); index2_multi_out_index[i] = {npu_op, out_index}; } is_weight_tensor = false; break; } } // weight tensor if (is_weight_tensor) { if (npu_specific_weight_nodes.find(op->type()) == npu_specific_weight_nodes.end()) { auto name = op->name() + "_" + std::to_string(count++); auto weight_const = new (std::nothrow) hiai::op::Const(op->name() + "_" + std::to_string(count++)); if (weight_const == nullptr) { MS_LOG(ERROR) << "New weight const failed."; return RET_ERROR; } auto weight_tensor = ConverterToNPUTensor(in_tensor); weight_const->set_attr_value(weight_tensor); input_ops.push_back(weight_const); op_buffer_.push_back(weight_const); } } } // set input to NPU int ret = reinterpret_cast<NPUOp *>(op)->SetNPUInputs(op->inputs(), op->outputs(), input_ops, index2_multi_out_index); if (ret != RET_OK) { MS_LOG(ERROR) << op->name() << " set npu inputs failed."; return RET_ERROR; } } return RET_OK; } bool NPUSubGraph::IsSubGraphInputTensor(mindspore::MSTensor input) { if (find(this->inputs().begin(), this->inputs().end(), input) != this->inputs().end()) { return true; } return false; } int NPUSubGraph::GetNPUOperators(const vector<NPUOp *> &ops) { subgraph_output_ops_.reserve(ops.size()); for (int i = 0; i < ops.size(); i++) { auto npu_op = reinterpret_cast<NPUOp *>(ops[i])->GetNPUOp(); if (npu_op == nullptr) { MS_LOG(ERROR) << "Get NPU operator for " << ops[i]->name() << " failed."; return RET_ERROR; } subgraph_output_ops_.push_back(*npu_op); } return RET_OK; } int NPUSubGraph::BuildNPUOutputOp() { subgraph_output_ops_.clear(); auto ret = GetNPUOperators(out_ops_); if (ret != RET_OK) { MS_LOG(ERROR) << "Get NPU operators failed."; return RET_ERROR; } out_tensor_sorted_.resize(outputs().size()); int i = 0; for (auto node : out_ops_) { for (auto tensor : node->outputs()) { if (std::find(outputs().begin(), outputs().end(), tensor) != outputs().end()) this->out_tensor_sorted_[i++] = tensor; } } if (subgraph_output_ops_.empty()) { MS_LOG(ERROR) << "NPU subgraph output op is empty."; return RET_ERROR; } return RET_OK; } std::string NPUSubGraph::GetOMModelName() { return this->name_ + ".om"; } int NPUSubGraph::Init() { auto ret = GetGraphInOutOps(); if (ret != RET_OK) { MS_LOG(ERROR) << "Get NPU subgraph input and output ops failed."; return RET_ERROR; } name_ = "kNpuSubGraph" + std::to_string(npu_manager_->SubGraphIndex()); auto model_buffer_data = BuildIRModel(); if (model_buffer_data == nullptr) { MS_LOG(ERROR) << "Build IR model failed."; return RET_ERROR; } MS_ASSERT(npu_manager_ != nullptr); npu_manager_->AddModel(model_buffer_data, GetOMModelName(), npu_manager_->GetFrequency()); executor_ = new (std::nothrow) NPUExecutor(GetOMModelName(), npu_manager_); if (executor_ == nullptr) { MS_LOG(ERROR) << "Create NPUExecutor failed."; return RET_ERROR; } return RET_OK; } int NPUSubGraph::Prepare() { if (executor_->Prepare() != RET_OK) { MS_LOG(ERROR) << "NPU executor prepare failed."; return RET_ERROR; } return RET_OK; } } // namespace mindspore
/* ************************************************************************ * Copyright (c) 2018-2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * ************************************************************************ */ #pragma once #ifndef ROCSPARSE_DOTI_HPP #define ROCSPARSE_DOTI_HPP #include "definitions.h" #include "doti_device.h" #include "handle.h" #include "rocsparse.h" #include "utility.h" #include <hip/hip_runtime.h> template <typename T> rocsparse_status rocsparse_doti_template(rocsparse_handle handle, rocsparse_int nnz, const T* x_val, const rocsparse_int* x_ind, const T* y, T* result, rocsparse_index_base idx_base) { // Check for valid handle if(handle == nullptr) { return rocsparse_status_invalid_handle; } // Logging if(handle->pointer_mode == rocsparse_pointer_mode_host) { log_trace(handle, replaceX<T>("rocsparse_Xdoti"), nnz, (const void*&)x_val, (const void*&)x_ind, (const void*&)y, *result, idx_base); log_bench(handle, "./rocsparse-bench -f doti -r", replaceX<T>("X"), "--mtx <vector.mtx> "); } else { log_trace(handle, replaceX<T>("rocsparse_Xdoti"), nnz, (const void*&)x_val, (const void*&)x_ind, (const void*&)y, (const void*&)result, idx_base); } // Check index base if(idx_base != rocsparse_index_base_zero && idx_base != rocsparse_index_base_one) { return rocsparse_status_invalid_value; } // Check size if(nnz < 0) { return rocsparse_status_invalid_size; } // Quick return if possible if(nnz == 0) { return rocsparse_status_success; } // Check pointer arguments if(x_val == nullptr) { return rocsparse_status_invalid_pointer; } else if(x_ind == nullptr) { return rocsparse_status_invalid_pointer; } else if(y == nullptr) { return rocsparse_status_invalid_pointer; } else if(result == nullptr) { return rocsparse_status_invalid_pointer; } // Stream hipStream_t stream = handle->stream; #define DOTI_DIM 256 // Get workspace from handle device buffer T* workspace = reinterpret_cast<T*>(handle->buffer); hipLaunchKernelGGL((doti_kernel_part1<T, DOTI_DIM>), dim3(DOTI_DIM), dim3(DOTI_DIM), 0, stream, nnz, x_val, x_ind, y, workspace, idx_base); if(handle->pointer_mode == rocsparse_pointer_mode_device) { hipLaunchKernelGGL((doti_kernel_part2<T, DOTI_DIM>), dim3(1), dim3(DOTI_DIM), 0, stream, DOTI_DIM, workspace, result); } else { hipLaunchKernelGGL((doti_kernel_part2<T, DOTI_DIM>), dim3(1), dim3(DOTI_DIM), 0, stream, DOTI_DIM, workspace, nullptr); RETURN_IF_HIP_ERROR(hipMemcpy(result, workspace, sizeof(T), hipMemcpyDeviceToHost)); } #undef DOTI_DIM return rocsparse_status_success; } #endif // ROCSPARSE_DOTI_HPP
#include <examplebase.h> #include <model.h> #include <material.h> #include <buffer.h> #include <random> #include <ctime> using namespace es; class Example final : public ExampleBase { public: std::array<std::shared_ptr<Model>, 16> spheres; std::unique_ptr<Framebuffer> hdrFBO; std::shared_ptr<Texture2D> fragColorTexture; std::shared_ptr<Texture2D> brightColorTexture; std::unique_ptr<Renderbuffer> hdrRBO; std::array<std::unique_ptr<Framebuffer>, 2> pingpongFBO; std::array<std::shared_ptr<Texture2D>, 2> pingpongBuffer; std::array<std::unique_ptr<Renderbuffer>, 2> pingpongRBO; std::shared_ptr<Mesh> blurQuad; std::shared_ptr<Mesh> hdrQuad; Example() { title = "bloom with hdr"; settings.vsync = true; defaultClearColor = glm::vec4(0.0f, 0.0f, 0.0f, 1.0f); modelsDirectory = getResourcesPath(ResourceType::Model); shadersDirectory = getResourcesPath(ResourceType::Shader) + "/26.bloom_with_hdr/"; } ~Example() { spheres.swap(std::array<std::shared_ptr<Model>, 16>()); } public: virtual void prepare() override { ExampleBase::prepare(); // setup camera mMainCamera->setPosition(glm::vec3(0.0f, 0.0f, 10.0f)); // enable depth test glEnable(GL_DEPTH_TEST); hdrFBO = Framebuffer::create(); fragColorTexture = Texture2D::createFromData(mWindowWidth, mWindowHeight, 1, 1, 1, GL_RGB16F, GL_RGB, GL_FLOAT); fragColorTexture->setMinFilter(GL_LINEAR); fragColorTexture->setMagFilter(GL_LINEAR); fragColorTexture->setWrapping(GL_CLAMP_TO_EDGE, GL_CLAMP_TO_EDGE, GL_CLAMP_TO_EDGE); brightColorTexture = Texture2D::createFromData(mWindowWidth, mWindowHeight, 1, 1, 1, GL_RGB16F, GL_RGB, GL_FLOAT); brightColorTexture->setMinFilter(GL_LINEAR); brightColorTexture->setMagFilter(GL_LINEAR); brightColorTexture->setWrapping(GL_CLAMP_TO_EDGE, GL_CLAMP_TO_EDGE, GL_CLAMP_TO_EDGE); hdrFBO->attachRenderTarget(0, fragColorTexture.get(), 0, 0); hdrFBO->attachRenderTarget(1, brightColorTexture.get(), 0, 0); hdrRBO = Renderbuffer::create(GL_DEPTH24_STENCIL8, mWindowWidth, mWindowHeight); hdrFBO->attachRenderBufferTarget(hdrRBO.get()); for (std::size_t i = 0; i < pingpongFBO.size(); i++) { pingpongFBO[i] = Framebuffer::create(); pingpongBuffer[i] = Texture2D::createFromData(mWindowWidth, mWindowHeight, 1, 1, 1, GL_RGB16F, GL_RGB, GL_FLOAT); pingpongBuffer[i]->setMinFilter(GL_LINEAR); pingpongBuffer[i]->setMagFilter(GL_LINEAR); pingpongBuffer[i]->setWrapping(GL_CLAMP_TO_EDGE, GL_CLAMP_TO_EDGE, GL_CLAMP_TO_EDGE); pingpongFBO[i]->attachRenderTarget(0, pingpongBuffer[i].get(), 0, 0); pingpongRBO[i] = Renderbuffer::create(GL_DEPTH24_STENCIL8, mWindowWidth, mWindowHeight); pingpongFBO[i]->attachRenderBufferTarget(pingpongRBO[i].get()); } std::vector<float> vertexAttribs = { // positions // texture coordinates 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, -1.0f, 0.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f, 0.0f, 0.0f, -1.0f, 1.0f, 0.0f, 0.0f, 1.0f }; std::vector<uint32_t> indices = { 0, 1, 3, 1, 2, 3 }; std::vector<Vertex> vertices = {}; for (uint32_t i = 0; i < static_cast<uint32_t>(vertexAttribs.size() / 5); i++) { Vertex vertex; vertex.vPosition = glm::vec3(vertexAttribs[i * 5], vertexAttribs[i * 5 + 1], vertexAttribs[i * 5 + 2]); vertex.vTexcoord = glm::vec2(vertexAttribs[i * 5 + 3], vertexAttribs[i * 5 + 4]); vertices.push_back(vertex); } std::shared_ptr<Material> blurMat = Material::createFromData("blur_mat", { shadersDirectory + "blur.vert", shadersDirectory + "blur.frag" }, { { "image", brightColorTexture } } ); std::shared_ptr<Material> hdrMat = Material::createFromData("hdr_mat", { shadersDirectory + "hdr.vert", shadersDirectory + "hdr.frag" }, { { "scene", fragColorTexture }, { "bloomBlur", pingpongBuffer[1] } } ); blurQuad = Mesh::createWithData("hdr_quad", vertices, indices); blurQuad->setDrawType(Mesh::DrawType::ELEMENTS); blurQuad->setMaterial(blurMat); blurQuad->setUniform("blurScale", 2.0f); blurQuad->setUniform("blurStrength", 1.0f); hdrQuad = Mesh::createWithData("blur_quad", vertices, indices); hdrQuad->setDrawType(Mesh::DrawType::ELEMENTS); hdrQuad->setMaterial(hdrMat); hdrQuad->setUniform("exposure", 1.0f); std::default_random_engine e(time(0)); std::uniform_real_distribution<double> u(0.3f, 2.0f); std::shared_ptr<Model> sphereTemplate = Model::createFromFile("sphere_template", modelsDirectory + "/sphere/sphere.obj", { shadersDirectory + "color_pass.vert", shadersDirectory + "color_pass.frag" }, true ); for (size_t i = 0; i < spheres.size(); i++) { std::shared_ptr<Model> sphere = Model::clone("sphere_" + std::to_string(i), sphereTemplate.get()); glm::vec3 randomColor = glm::vec3(u(e), u(e), u(e)); sphere->setUniform("randomColor", randomColor); glm::vec3 pos = glm::vec3(sin(glm::radians(i * (360.0f / spheres.size()))), cos(glm::radians(i * (360.0f / spheres.size()))), 0.0f) * 3.5f; sphere->setPosition(pos); sphere->setScale(glm::vec3(0.02f)); spheres[i] = sphere; } } virtual void render(float deltaTime) override { hdrFBO->bind(); glClearColor(0.0f, 0.0f, 0.0f, 1.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT); for (size_t i = 0; i < spheres.size(); i++) { spheres[i]->render(); } hdrFBO->unbind(); glClearColor(0.0f, 0.0f, 0.0f, 1.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT); bool horizontal = true; bool firstIteration = true; uint8_t amount = 10; for (uint8_t i = 0; i < amount; i++) { pingpongFBO[horizontal]->bind(); glClearColor(0.0f, 0.0f, 0.0f, 1.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT); blurQuad->setUniform("horizontal", horizontal); blurQuad->setTexture("image", firstIteration ? brightColorTexture : pingpongBuffer[!horizontal]); blurQuad->render(); horizontal = !horizontal; if (firstIteration) firstIteration = false; } glBindFramebuffer(GL_FRAMEBUFFER, 0); glClearColor(0.0f, 0.0f, 0.0f, 1.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT); hdrQuad->render(); } virtual void windowResized() override { ExampleBase::windowResized(); fragColorTexture->resize(0, mWindowWidth, mWindowHeight); brightColorTexture->resize(0, mWindowWidth, mWindowHeight); hdrRBO->resize(mWindowWidth, mWindowHeight); for (std::size_t i = 0; i < pingpongBuffer.size(); i++) { pingpongBuffer[i]->resize(0, mWindowWidth, mWindowHeight); } for (std::size_t i = 0; i < pingpongRBO.size(); i++) { pingpongRBO[i]->resize(mWindowWidth, mWindowHeight); } } }; Example* example; int APIENTRY WinMain(HINSTANCE hInstance, HINSTANCE, LPSTR, int) { example = new Example(); example->setupValidation(); if (!example->setupSDL() || !example->loadGLESFunctions() || !example->setupImGui()) { return 0; } example->prepare(); example->renderLoop(); delete(example); return 0; }
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/metrics/leak_detector/call_stack_manager.h" #include <stdint.h> #include <algorithm> #include "base/macros.h" #include "base/memory/scoped_ptr.h" #include "components/metrics/leak_detector/custom_allocator.h" #include "testing/gtest/include/gtest/gtest.h" namespace metrics { namespace leak_detector { namespace { // Some test call stacks. The addresses are 64-bit but they should automatically // be truncated to 32 bits on a 32-bit machine. const void* kRawStack0[] = { reinterpret_cast<const void*>(0x8899aabbccddeeff), reinterpret_cast<const void*>(0x0000112233445566), reinterpret_cast<const void*>(0x5566778899aabbcc), reinterpret_cast<const void*>(0x9988776655443322), }; // This is similar to kRawStack0, differing only in one address by 1. It should // still produce a distinct CallStack object and hash. const void* kRawStack1[] = { kRawStack0[0], kRawStack0[1], reinterpret_cast<const void*>(reinterpret_cast<uintptr_t>(kRawStack0[2]) + 1), kRawStack0[3], }; const void* kRawStack2[] = { reinterpret_cast<const void*>(0x900df00dcab58888), reinterpret_cast<const void*>(0x00001337cafedeed), reinterpret_cast<const void*>(0x0000deafbabe1234), }; const void* kRawStack3[] = { reinterpret_cast<const void*>(0x0000000012345678), reinterpret_cast<const void*>(0x00000000abcdef01), reinterpret_cast<const void*>(0x00000000fdecab98), reinterpret_cast<const void*>(0x0000deadbeef0001), reinterpret_cast<const void*>(0x0000900ddeed0002), reinterpret_cast<const void*>(0x0000f00dcafe0003), reinterpret_cast<const void*>(0x0000f00d900d0004), reinterpret_cast<const void*>(0xdeedcafebabe0005), }; // Creates a copy of a call stack as a scoped_ptr to a raw stack. The depth is // the same as the original stack, but it is not stored in the result. scoped_ptr<const void* []> CopyStack(const CallStack* stack) { scoped_ptr<const void* []> stack_copy(new const void*[stack->depth]); std::copy(stack->stack, stack->stack + stack->depth, stack_copy.get()); return stack_copy; } } // namespace class CallStackManagerTest : public ::testing::Test { public: CallStackManagerTest() {} void SetUp() override { CustomAllocator::Initialize(); } void TearDown() override { EXPECT_TRUE(CustomAllocator::Shutdown()); } private: DISALLOW_COPY_AND_ASSIGN(CallStackManagerTest); }; TEST_F(CallStackManagerTest, NewStacks) { CallStackManager manager; EXPECT_EQ(0U, manager.size()); // Request some new stacks and make sure their creation is reflected in the // size of |manager|. const CallStack* stack0 = manager.GetCallStack(arraysize(kRawStack0), kRawStack0); EXPECT_EQ(arraysize(kRawStack0), stack0->depth); EXPECT_EQ(1U, manager.size()); const CallStack* stack1 = manager.GetCallStack(arraysize(kRawStack1), kRawStack1); EXPECT_EQ(arraysize(kRawStack1), stack1->depth); EXPECT_EQ(2U, manager.size()); const CallStack* stack2 = manager.GetCallStack(arraysize(kRawStack2), kRawStack2); EXPECT_EQ(arraysize(kRawStack2), stack2->depth); EXPECT_EQ(3U, manager.size()); const CallStack* stack3 = manager.GetCallStack(arraysize(kRawStack3), kRawStack3); EXPECT_EQ(arraysize(kRawStack3), stack3->depth); EXPECT_EQ(4U, manager.size()); // Call stack objects should be unique. EXPECT_NE(stack0, stack1); EXPECT_NE(stack0, stack2); EXPECT_NE(stack0, stack3); EXPECT_NE(stack1, stack2); EXPECT_NE(stack1, stack3); EXPECT_NE(stack2, stack3); } TEST_F(CallStackManagerTest, Hashes) { CallStackManager manager; const CallStack* stack0 = manager.GetCallStack(arraysize(kRawStack0), kRawStack0); const CallStack* stack1 = manager.GetCallStack(arraysize(kRawStack1), kRawStack1); const CallStack* stack2 = manager.GetCallStack(arraysize(kRawStack2), kRawStack2); const CallStack* stack3 = manager.GetCallStack(arraysize(kRawStack3), kRawStack3); // Hash values should be unique. This test is not designed to make sure the // hash function is generating unique hashes, but that CallStackManager is // properly storing the hashes in CallStack structs. EXPECT_NE(stack0->hash, stack1->hash); EXPECT_NE(stack0->hash, stack2->hash); EXPECT_NE(stack0->hash, stack3->hash); EXPECT_NE(stack1->hash, stack2->hash); EXPECT_NE(stack1->hash, stack3->hash); EXPECT_NE(stack2->hash, stack3->hash); } TEST_F(CallStackManagerTest, MultipleManagersHashes) { CallStackManager manager1; const CallStack* stack10 = manager1.GetCallStack(arraysize(kRawStack0), kRawStack0); const CallStack* stack11 = manager1.GetCallStack(arraysize(kRawStack1), kRawStack1); const CallStack* stack12 = manager1.GetCallStack(arraysize(kRawStack2), kRawStack2); const CallStack* stack13 = manager1.GetCallStack(arraysize(kRawStack3), kRawStack3); CallStackManager manager2; const CallStack* stack20 = manager2.GetCallStack(arraysize(kRawStack0), kRawStack0); const CallStack* stack21 = manager2.GetCallStack(arraysize(kRawStack1), kRawStack1); const CallStack* stack22 = manager2.GetCallStack(arraysize(kRawStack2), kRawStack2); const CallStack* stack23 = manager2.GetCallStack(arraysize(kRawStack3), kRawStack3); // Different CallStackManagers should still generate the same hashes. EXPECT_EQ(stack10->hash, stack20->hash); EXPECT_EQ(stack11->hash, stack21->hash); EXPECT_EQ(stack12->hash, stack22->hash); EXPECT_EQ(stack13->hash, stack23->hash); } TEST_F(CallStackManagerTest, HashWithReducedDepth) { CallStackManager manager; const CallStack* stack = manager.GetCallStack(arraysize(kRawStack3), kRawStack3); // Hash function should only operate on the first |CallStack::depth| elements // of CallStack::stack. To test this, reduce the depth value of one of the // stacks and make sure the hash changes. EXPECT_NE(stack->hash, manager.GetCallStack(stack->depth - 1, stack->stack)->hash); EXPECT_NE(stack->hash, manager.GetCallStack(stack->depth - 2, stack->stack)->hash); EXPECT_NE(stack->hash, manager.GetCallStack(stack->depth - 3, stack->stack)->hash); EXPECT_NE(stack->hash, manager.GetCallStack(stack->depth - 4, stack->stack)->hash); // Also try subsets of the stack that don't start from the beginning. EXPECT_NE(stack->hash, manager.GetCallStack(stack->depth - 1, stack->stack + 1)->hash); EXPECT_NE(stack->hash, manager.GetCallStack(stack->depth - 2, stack->stack + 2)->hash); EXPECT_NE(stack->hash, manager.GetCallStack(stack->depth - 3, stack->stack + 3)->hash); EXPECT_NE(stack->hash, manager.GetCallStack(stack->depth - 4, stack->stack + 4)->hash); } TEST_F(CallStackManagerTest, DuplicateStacks) { CallStackManager manager; EXPECT_EQ(0U, manager.size()); // Calling manager.GetCallStack() multiple times with the same raw stack // arguments will not result in creation of new call stack objects after the // first call. Instead, the previously created object will be returned, and // the size of |manager| will remain unchanged. // // Thus a call to GetCallStack() will always return the same result, given the // same inputs. // Add stack0. const CallStack* stack0 = manager.GetCallStack(arraysize(kRawStack0), kRawStack0); scoped_ptr<const void* []> rawstack0_duplicate0 = CopyStack(stack0); const CallStack* stack0_duplicate0 = manager.GetCallStack(arraysize(kRawStack0), rawstack0_duplicate0.get()); EXPECT_EQ(1U, manager.size()); EXPECT_EQ(stack0, stack0_duplicate0); // Add stack1. const CallStack* stack1 = manager.GetCallStack(arraysize(kRawStack1), kRawStack1); EXPECT_EQ(2U, manager.size()); scoped_ptr<const void* []> rawstack0_duplicate1 = CopyStack(stack0); const CallStack* stack0_duplicate1 = manager.GetCallStack(arraysize(kRawStack0), rawstack0_duplicate1.get()); EXPECT_EQ(2U, manager.size()); EXPECT_EQ(stack0, stack0_duplicate1); scoped_ptr<const void* []> rawstack1_duplicate0 = CopyStack(stack1); const CallStack* stack1_duplicate0 = manager.GetCallStack(arraysize(kRawStack1), rawstack1_duplicate0.get()); EXPECT_EQ(2U, manager.size()); EXPECT_EQ(stack1, stack1_duplicate0); // Add stack2 and stack3. const CallStack* stack2 = manager.GetCallStack(arraysize(kRawStack2), kRawStack2); const CallStack* stack3 = manager.GetCallStack(arraysize(kRawStack3), kRawStack3); EXPECT_EQ(4U, manager.size()); scoped_ptr<const void* []> rawstack1_duplicate1 = CopyStack(stack1); const CallStack* stack1_duplicate1 = manager.GetCallStack(arraysize(kRawStack1), rawstack1_duplicate1.get()); EXPECT_EQ(4U, manager.size()); EXPECT_EQ(stack1, stack1_duplicate1); scoped_ptr<const void* []> rawstack0_duplicate2 = CopyStack(stack0); const CallStack* stack0_duplicate2 = manager.GetCallStack(arraysize(kRawStack0), rawstack0_duplicate2.get()); EXPECT_EQ(4U, manager.size()); EXPECT_EQ(stack0, stack0_duplicate2); scoped_ptr<const void* []> rawstack3_duplicate0 = CopyStack(stack3); const CallStack* stack3_duplicate0 = manager.GetCallStack(arraysize(kRawStack3), rawstack3_duplicate0.get()); EXPECT_EQ(4U, manager.size()); EXPECT_EQ(stack3, stack3_duplicate0); scoped_ptr<const void* []> rawstack2_duplicate0 = CopyStack(stack2); const CallStack* stack2_duplicate0 = manager.GetCallStack(arraysize(kRawStack2), rawstack2_duplicate0.get()); EXPECT_EQ(4U, manager.size()); EXPECT_EQ(stack2, stack2_duplicate0); } } // namespace leak_detector } // namespace metrics
// Copyright (C) 2020 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "layer_transformation.hpp" #include <string> #include <sstream> #include <memory> #include <gtest/gtest.h> #include <transformations/utils/utils.hpp> #include <transformations/init_node_info.hpp> #include <transformations/low_precision/transformer.hpp> #include <transformations/low_precision/concat.hpp> #include <transformations/low_precision/concat_multi_channels.hpp> #include "common_test_utils/ngraph_test_utils.hpp" #include "ngraph_functions/low_precision_transformations/concat_function.hpp" #include "ngraph_functions/low_precision_transformations/common/fake_quantize_on_data.hpp" #include "simple_low_precision_transformer.hpp" using namespace testing; using namespace ngraph; using namespace ngraph::pass; namespace { class ConcatTransformationActualValues { public: ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize1; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize2; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize3; }; inline std::ostream& operator<<(std::ostream& out, const ConcatTransformationActualValues& values) { return out << "_" << values.fakeQuantize1 << "_" << values.fakeQuantize2 << "_" << values.fakeQuantize3; } class ConcatTransformationResultValues { public: ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize1; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize2; ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize3; ngraph::builder::subgraph::DequantizationOperations dequantizationOperations1; ngraph::builder::subgraph::DequantizationOperations dequantizationOperations2; }; inline std::ostream& operator<<(std::ostream& out, const ConcatTransformationResultValues& values) { return out << "_" << values.fakeQuantize1 << "_" << values.fakeQuantize2 << "_" << values.fakeQuantize3 << "_" << values.dequantizationOperations1 << "_" << values.dequantizationOperations2; } class ConcatTransformationTestValues { public: ngraph::pass::low_precision::LayerTransformation::Params params; bool multiChannels; ConcatTransformationActualValues actual; ConcatTransformationResultValues result; }; inline std::ostream& operator<<(std::ostream& out, const ConcatTransformationTestValues& values) { return out << "_" << values.multiChannels << "_" << values.actual << "_" << values.result; } typedef std::tuple < ngraph::element::Type, bool, ngraph::Shape, ConcatTransformationTestValues > ConcatTransformationParams; class ConcatWithNeighborsTransformation : public LayerTransformation, public testing::WithParamInterface<ConcatTransformationParams> { public: void SetUp() override { const ngraph::element::Type precision = std::get<0>(GetParam()); const bool updatePrecisions = std::get<1>(GetParam()); const ngraph::Shape shape = std::get<2>(GetParam()); ConcatTransformationTestValues testValues = std::get<3>(GetParam()); testValues.params.updatePrecisions = updatePrecisions; if (!updatePrecisions) { testValues.result.fakeQuantize1.outputPrecision = testValues.actual.fakeQuantize1.outputPrecision; testValues.result.fakeQuantize2.outputPrecision = testValues.actual.fakeQuantize2.outputPrecision; testValues.result.fakeQuantize3.outputPrecision = testValues.actual.fakeQuantize3.outputPrecision; } actualFunction = ngraph::builder::subgraph::ConcatFunction::getOriginalWithNeighbors( precision, shape, testValues.actual.fakeQuantize1, testValues.actual.fakeQuantize2, testValues.actual.fakeQuantize3); SimpleLowPrecisionTransformer transform; if (testValues.multiChannels) { transform.add<ngraph::pass::low_precision::ConcatMultiChannelsTransformation, ngraph::opset1::Concat>(testValues.params); } else { transform.add<ngraph::pass::low_precision::ConcatTransformation, ngraph::opset1::Concat>(testValues.params); } transform.transform(actualFunction); referenceFunction = ngraph::builder::subgraph::ConcatFunction::getReferenceWithNeighbors( precision, shape, testValues.result.fakeQuantize1, testValues.result.fakeQuantize2, testValues.result.fakeQuantize3, testValues.result.dequantizationOperations1, testValues.result.dequantizationOperations2); } static std::string getTestCaseName(testing::TestParamInfo<ConcatTransformationParams> obj) { const ngraph::element::Type precision = std::get<0>(obj.param); const bool updatePrecision = std::get<1>(obj.param); const ngraph::Shape shape = std::get<2>(obj.param); const ConcatTransformationTestValues testValues = std::get<3>(obj.param); std::ostringstream result; result << LayerTransformation::getTestCaseNameByParams(precision, shape, testValues.params) << "_" << (testValues.multiChannels ? "multiChannels_" : "notMultiChannels_") << (updatePrecision ? "updatePrecision_" : "notUpdatePrecision_") << testValues.actual << "_" << testValues.result << "_"; return result.str(); } }; TEST_P(ConcatWithNeighborsTransformation, CompareFunctions) { actualFunction->validate_nodes_and_infer_types(); auto res = compare_functions(referenceFunction, actualFunction, true, true, true); ASSERT_TRUE(res.first) << res.second; } const std::vector<ngraph::element::Type> precisions = { ngraph::element::f32, // ngraph::element::f16 }; const std::vector<bool> updatePrecisions = { true, false }; const std::vector<ConcatTransformationTestValues> testValues = { // U8: concat { LayerTransformation::createParamsU8I8(), false, { { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 3.f} } }, { { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8 }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {128.f}, ngraph::element::u8 }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {85.f}, ngraph::element::u8 }, { ngraph::element::f32, {}, { 0.01f } }, { ngraph::element::f32, {}, { 0.01f } } } }, // U8: concat multi channels { LayerTransformation::createParamsU8I8(), true, { { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 2.f} }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f / 3.f} } }, { { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8 }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8 }, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8 }, { ngraph::element::f32, {}, {{ 0.01f, 0.01f, 0.01f, 0.005f, 0.005f, 0.005f }} }, { ngraph::element::f32, {}, {{ 0.005f, 0.005f, 0.005f, 0.00333f, 0.00333f, 0.00333f }} } } }, // U8: concat multi channels with subtract { LayerTransformation::createParamsU8I8(), true, { { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, { 256ul, ngraph::Shape({}), {1.275f}, {2.55f}, {1.275f}, {2.55f} }, { 256ul, ngraph::Shape({}), {1.275f}, {2.55f}, {1.275f}, {2.55f} } }, { { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8 }, { 256ul, ngraph::Shape({}), {1.275f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8 }, { 256ul, ngraph::Shape({}), {1.275f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8 }, { ngraph::element::f32, {{ 0.f, 0.f, 0.f, -255.f, -255.f, -255.f }}, {{ 0.01f, 0.01f, 0.01f, 0.005f, 0.005f, 0.005f }} }, { ngraph::element::f32, { -255.f }, { 0.005f } } } }, // I8: concat { LayerTransformation::createParamsI8I8(), false, { { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, { 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-1.28f / 2.f}, {1.27f / 2.f} }, { 256ul, ngraph::Shape({}), {-1.28f / 3.f}, {1.27f / 3.f}, {-1.28f / 3.f}, {1.27f / 3.f} } }, { { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-128.f}, {127.f}, ngraph::element::i8 }, { 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-64}, {64.f}, ngraph::element::i8 }, { 256ul, ngraph::Shape({}), {-1.28f / 3.f}, {1.27f / 3.f}, {-43}, {42.f}, ngraph::element::i8 }, { ngraph::element::f32, {}, { 0.01f } }, { ngraph::element::f32, {}, { 0.01f } } } }, // I8: concat multi channels { LayerTransformation::createParamsI8I8(), true, { { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, { 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-1.28f / 2.f}, {1.27f / 2.f} }, { 256ul, ngraph::Shape({}), {-1.28f / 3.f}, {1.27f / 3.f}, {-1.28f / 3.f}, {1.27f / 3.f} } }, { { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-128.f}, {127.f}, ngraph::element::i8 }, { 256ul, ngraph::Shape({}), {-1.28f / 2.f}, {1.27f / 2.f}, {-128.f}, {127.f}, ngraph::element::i8 }, { 256ul, ngraph::Shape({}), {-1.28f / 3.f}, {1.27f / 3.f}, {-128.f}, {127.f}, ngraph::element::i8 }, { ngraph::element::f32, {}, {{ 0.01f, 0.01f, 0.01f, 0.005f, 0.005f, 0.005f }} }, { ngraph::element::f32, {}, {{ 0.005f, 0.005f, 0.005f, 0.00333f, 0.00333f, 0.00333f }} } } }, // mixed: U8 + I8: concat multi channels { LayerTransformation::createParamsU8I8(), true, { { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} }, { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {-1.28f}, {1.27f} } }, { { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {255.f}, ngraph::element::u8 }, { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {0.f}, {255.f}, ngraph::element::u8 }, { 256ul, ngraph::Shape({}), {-1.28f}, {1.27f}, {0.f}, {255.f}, ngraph::element::u8 }, { ngraph::element::f32, {{ 0.f, 0.f, 0.f, 128.f, 128.f, 128.f }}, { 0.01f } }, { ngraph::element::f32, { 128.f }, { 0.01f } } } }, }; const std::vector<ngraph::Shape> shapes = { { 1, 3, 9, 9 }, { 4, 3, 9, 9 } }; INSTANTIATE_TEST_CASE_P( LPT, ConcatWithNeighborsTransformation, ::testing::Combine( ::testing::ValuesIn(precisions), ::testing::ValuesIn(updatePrecisions), ::testing::ValuesIn(shapes), ::testing::ValuesIn(testValues)), ConcatWithNeighborsTransformation::getTestCaseName); } // namespace
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/printing/print_job.h" #include "base/bind.h" #include "base/bind_helpers.h" #include "base/message_loop.h" #include "base/threading/thread_restrictions.h" #include "base/threading/worker_pool.h" #include "base/timer.h" #include "chrome/browser/printing/print_job_worker.h" #include "chrome/common/chrome_notification_types.h" #include "content/public/browser/notification_service.h" #include "printing/printed_document.h" #include "printing/printed_page.h" using base::TimeDelta; namespace { // Helper function to ensure |owner| is valid until at least |callback| returns. void HoldRefCallback(const scoped_refptr<printing::PrintJobWorkerOwner>& owner, const base::Closure& callback) { callback.Run(); } } // namespace namespace printing { PrintJob::PrintJob() : ui_message_loop_(base::MessageLoop::current()), source_(NULL), worker_(), settings_(), is_job_pending_(false), is_canceling_(false), is_stopping_(false), is_stopped_(false), quit_factory_(this), weak_ptr_factory_(this) { DCHECK(ui_message_loop_); // This is normally a UI message loop, but in unit tests, the message loop is // of the 'default' type. DCHECK(ui_message_loop_->type() == base::MessageLoop::TYPE_UI || ui_message_loop_->type() == base::MessageLoop::TYPE_DEFAULT); ui_message_loop_->AddDestructionObserver(this); } PrintJob::~PrintJob() { ui_message_loop_->RemoveDestructionObserver(this); // The job should be finished (or at least canceled) when it is destroyed. DCHECK(!is_job_pending_); DCHECK(!is_canceling_); if (worker_.get()) DCHECK(worker_->message_loop() == NULL); DCHECK_EQ(ui_message_loop_, base::MessageLoop::current()); } void PrintJob::Initialize(PrintJobWorkerOwner* job, PrintedPagesSource* source, int page_count) { DCHECK(!source_); DCHECK(!worker_.get()); DCHECK(!is_job_pending_); DCHECK(!is_canceling_); DCHECK(!document_.get()); source_ = source; worker_.reset(job->DetachWorker(this)); settings_ = job->settings(); PrintedDocument* new_doc = new PrintedDocument(settings_, source_, job->cookie()); new_doc->set_page_count(page_count); UpdatePrintedDocument(new_doc); // Don't forget to register to our own messages. registrar_.Add(this, chrome::NOTIFICATION_PRINT_JOB_EVENT, content::Source<PrintJob>(this)); } void PrintJob::Observe(int type, const content::NotificationSource& source, const content::NotificationDetails& details) { DCHECK_EQ(ui_message_loop_, base::MessageLoop::current()); switch (type) { case chrome::NOTIFICATION_PRINT_JOB_EVENT: { OnNotifyPrintJobEvent(*content::Details<JobEventDetails>(details).ptr()); break; } default: { break; } } } void PrintJob::GetSettingsDone(const PrintSettings& new_settings, PrintingContext::Result result) { NOTREACHED(); } PrintJobWorker* PrintJob::DetachWorker(PrintJobWorkerOwner* new_owner) { NOTREACHED(); return NULL; } base::MessageLoop* PrintJob::message_loop() { return ui_message_loop_; } const PrintSettings& PrintJob::settings() const { return settings_; } int PrintJob::cookie() const { if (!document_.get()) // Always use an invalid cookie in this case. return 0; return document_->cookie(); } void PrintJob::WillDestroyCurrentMessageLoop() { NOTREACHED(); } void PrintJob::StartPrinting() { DCHECK_EQ(ui_message_loop_, base::MessageLoop::current()); DCHECK(worker_->message_loop()); DCHECK(!is_job_pending_); if (!worker_->message_loop() || is_job_pending_) return; // Real work is done in PrintJobWorker::StartPrinting(). worker_->message_loop()->PostTask( FROM_HERE, base::Bind(&HoldRefCallback, make_scoped_refptr(this), base::Bind(&PrintJobWorker::StartPrinting, base::Unretained(worker_.get()), document_))); // Set the flag right now. is_job_pending_ = true; // Tell everyone! scoped_refptr<JobEventDetails> details( new JobEventDetails(JobEventDetails::NEW_DOC, document_.get(), NULL)); content::NotificationService::current()->Notify( chrome::NOTIFICATION_PRINT_JOB_EVENT, content::Source<PrintJob>(this), content::Details<JobEventDetails>(details.get())); } void PrintJob::Stop() { DCHECK_EQ(ui_message_loop_, base::MessageLoop::current()); if (quit_factory_.HasWeakPtrs()) { // In case we're running a nested message loop to wait for a job to finish, // and we finished before the timeout, quit the nested loop right away. Quit(); quit_factory_.InvalidateWeakPtrs(); } // Be sure to live long enough. scoped_refptr<PrintJob> handle(this); base::MessageLoop* worker_loop = worker_->message_loop(); if (worker_loop) { ControlledWorkerShutdown(); is_job_pending_ = false; registrar_.Remove(this, chrome::NOTIFICATION_PRINT_JOB_EVENT, content::Source<PrintJob>(this)); } // Flush the cached document. UpdatePrintedDocument(NULL); } void PrintJob::Cancel() { if (is_canceling_) return; is_canceling_ = true; // Be sure to live long enough. scoped_refptr<PrintJob> handle(this); DCHECK_EQ(ui_message_loop_, base::MessageLoop::current()); base::MessageLoop* worker_loop = worker_.get() ? worker_->message_loop() : NULL; if (worker_loop) { // Call this right now so it renders the context invalid. Do not use // InvokeLater since it would take too much time. worker_->Cancel(); } // Make sure a Cancel() is broadcast. scoped_refptr<JobEventDetails> details( new JobEventDetails(JobEventDetails::FAILED, NULL, NULL)); content::NotificationService::current()->Notify( chrome::NOTIFICATION_PRINT_JOB_EVENT, content::Source<PrintJob>(this), content::Details<JobEventDetails>(details.get())); Stop(); is_canceling_ = false; } bool PrintJob::FlushJob(base::TimeDelta timeout) { // Make sure the object outlive this message loop. scoped_refptr<PrintJob> handle(this); base::MessageLoop::current()->PostDelayedTask(FROM_HERE, base::Bind(&PrintJob::Quit, quit_factory_.GetWeakPtr()), timeout); base::MessageLoop::ScopedNestableTaskAllower allow( base::MessageLoop::current()); base::MessageLoop::current()->Run(); return true; } void PrintJob::DisconnectSource() { source_ = NULL; if (document_.get()) document_->DisconnectSource(); } bool PrintJob::is_job_pending() const { return is_job_pending_; } bool PrintJob::is_stopping() const { return is_stopping_; } bool PrintJob::is_stopped() const { return is_stopped_; } PrintedDocument* PrintJob::document() const { return document_.get(); } void PrintJob::UpdatePrintedDocument(PrintedDocument* new_document) { if (document_.get() == new_document) return; document_ = new_document; if (document_.get()) { settings_ = document_->settings(); } if (worker_.get() && worker_->message_loop()) { DCHECK(!is_job_pending_); // Sync the document with the worker. worker_->message_loop()->PostTask( FROM_HERE, base::Bind(&HoldRefCallback, make_scoped_refptr(this), base::Bind(&PrintJobWorker::OnDocumentChanged, base::Unretained(worker_.get()), document_))); } } void PrintJob::OnNotifyPrintJobEvent(const JobEventDetails& event_details) { switch (event_details.type()) { case JobEventDetails::FAILED: { settings_.Clear(); // No need to cancel since the worker already canceled itself. Stop(); break; } case JobEventDetails::USER_INIT_DONE: case JobEventDetails::DEFAULT_INIT_DONE: case JobEventDetails::USER_INIT_CANCELED: { DCHECK_EQ(event_details.document(), document_.get()); break; } case JobEventDetails::NEW_DOC: case JobEventDetails::NEW_PAGE: case JobEventDetails::PAGE_DONE: case JobEventDetails::JOB_DONE: case JobEventDetails::ALL_PAGES_REQUESTED: { // Don't care. break; } case JobEventDetails::DOC_DONE: { // This will call Stop() and broadcast a JOB_DONE message. base::MessageLoop::current()->PostTask( FROM_HERE, base::Bind(&PrintJob::OnDocumentDone, this)); break; } default: { NOTREACHED(); break; } } } void PrintJob::OnDocumentDone() { // Be sure to live long enough. The instance could be destroyed by the // JOB_DONE broadcast. scoped_refptr<PrintJob> handle(this); // Stop the worker thread. Stop(); scoped_refptr<JobEventDetails> details( new JobEventDetails(JobEventDetails::JOB_DONE, document_.get(), NULL)); content::NotificationService::current()->Notify( chrome::NOTIFICATION_PRINT_JOB_EVENT, content::Source<PrintJob>(this), content::Details<JobEventDetails>(details.get())); } void PrintJob::ControlledWorkerShutdown() { DCHECK_EQ(ui_message_loop_, base::MessageLoop::current()); // The deadlock this code works around is specific to window messaging on // Windows, so we aren't likely to need it on any other platforms. #if defined(OS_WIN) // We could easily get into a deadlock case if worker_->Stop() is used; the // printer driver created a window as a child of the browser window. By // canceling the job, the printer driver initiated dialog box is destroyed, // which sends a blocking message to its parent window. If the browser window // thread is not processing messages, a deadlock occurs. // // This function ensures that the dialog box will be destroyed in a timely // manner by the mere fact that the thread will terminate. So the potential // deadlock is eliminated. worker_->StopSoon(); // Run a tight message loop until the worker terminates. It may seems like a // hack but I see no other way to get it to work flawlessly. The issues here // are: // - We don't want to run tasks while the thread is quitting. // - We want this code path to wait on the thread to quit before continuing. MSG msg; HANDLE thread_handle = worker_->thread_handle().platform_handle(); for (; thread_handle;) { // Note that we don't do any kind of message prioritization since we don't // execute any pending task or timer. DWORD result = MsgWaitForMultipleObjects(1, &thread_handle, FALSE, INFINITE, QS_ALLINPUT); if (result == WAIT_OBJECT_0 + 1) { while (PeekMessage(&msg, NULL, 0, 0, TRUE) > 0) { TranslateMessage(&msg); DispatchMessage(&msg); } // Continue looping. } else if (result == WAIT_OBJECT_0) { // The thread quit. break; } else { // An error occurred. Assume the thread quit. NOTREACHED(); break; } } #endif // Now make sure the thread object is cleaned up. Do this on a worker // thread because it may block. is_stopping_ = true; base::WorkerPool::PostTaskAndReply( FROM_HERE, base::Bind(&PrintJobWorker::Stop, base::Unretained(worker_.get())), base::Bind(&PrintJob::HoldUntilStopIsCalled, weak_ptr_factory_.GetWeakPtr(), scoped_refptr<PrintJob>(this)), false); } void PrintJob::HoldUntilStopIsCalled(const scoped_refptr<PrintJob>&) { is_stopped_ = true; is_stopping_ = false; } void PrintJob::Quit() { base::MessageLoop::current()->Quit(); } // Takes settings_ ownership and will be deleted in the receiving thread. JobEventDetails::JobEventDetails(Type type, PrintedDocument* document, PrintedPage* page) : document_(document), page_(page), type_(type) { } JobEventDetails::~JobEventDetails() { } PrintedDocument* JobEventDetails::document() const { return document_.get(); } PrintedPage* JobEventDetails::page() const { return page_.get(); } } // namespace printing
#include <control/ui_label.h> #include <control/ui_button.h> #include <control/ui_deck.h> #include <control/ui_radio.h> #include <control/ui_radiogroup.h> #include <control/ui_viewport.h> /// <summary> /// Initializes the viewport stack. /// </summary> /// <param name="viewport">The viewport.</param> /// <returns></returns> void InitViewport_Stack(LongUI::UIViewport& viewport) noexcept { using namespace LongUI; auto& window = viewport.RefWindow(); const auto group = longui_cast<UIRadioGroup*>(window.FindControl("radiogroup")); const auto deck = longui_cast<UIDeck*>(window.FindControl("my-deck")); uint32_t index = 0; for (auto& child : (*group)) { auto& radio = *longui_cast<UIRadio*>(&child); radio.AddGuiEventListener(radio._onCommand(), [=](const LongUI::GuiEventArg&) noexcept { deck->SetSelectedIndex(index); return Event_Accept; }); index++; } }
inline PDFDraw::PDFDraw(double dpi) { REX(TRN_PDFDrawCreate(dpi,&mp_draw)); } inline PDFDraw::~PDFDraw() { DREX(mp_draw, TRN_PDFDrawDestroy(mp_draw)); } inline void PDFDraw::Destroy() { REX(TRN_PDFDrawDestroy(mp_draw)); mp_draw=0; } inline void PDFDraw::SetRasterizerType (PDFRasterizer::Type type) { REX(TRN_PDFDrawSetRasterizerType(mp_draw,(enum TRN_PDFRasterizerType)type)); } inline void PDFDraw::SetDPI(double dpi) { REX(TRN_PDFDrawSetDPI(mp_draw,dpi)); } inline void PDFDraw::SetImageSize(int width, int height, bool preserve_aspect_ratio) { REX(TRN_PDFDrawSetImageSize(mp_draw,width,height,BToTB(preserve_aspect_ratio))); } inline void PDFDraw::SetPageBox(Page::Box region) { REX(TRN_PDFDrawSetPageBox(mp_draw,(enum TRN_PageBox)region)); } inline void PDFDraw::SetClipRect(Rect rect) { REX(TRN_PDFDrawSetClipRect(mp_draw, (const TRN_Rect*)&rect )); } inline void PDFDraw::SetFlipYAxis(bool flip_y) { REX(TRN_PDFDrawSetFlipYAxis(mp_draw,BToTB(flip_y))); } inline void PDFDraw::SetRotate(Page::Rotate r) { REX(TRN_PDFDrawSetRotate(mp_draw,(enum TRN_PageRotate)r)); } inline void PDFDraw::SetDrawAnnotations(bool render_annots) { REX(TRN_PDFDrawSetDrawAnnotations(mp_draw,BToTB(render_annots))); } inline void PDFDraw::SetHighlightFields(bool highlight) { REX(TRN_PDFDrawSetHighlightFields(mp_draw,BToTB(highlight))); } inline void PDFDraw::SetGamma(double exp) { REX(TRN_PDFDrawSetGamma(mp_draw,exp)); } inline void PDFDraw::SetOCGContext(OCG::Context* ctx) { REX(TRN_PDFDrawSetOCGContext(mp_draw, ctx->mp_obj)); } inline void PDFDraw::SetPrintMode(bool is_printing) { REX(TRN_PDFDrawSetPrintMode(mp_draw,BToTB(is_printing))); } inline void PDFDraw::SetDefaultPageColor(UInt8 r, UInt8 g, UInt8 b) { REX(TRN_PDFDrawSetDefaultPageColor(mp_draw, r, g, b)); } inline void PDFDraw::SetPageTransparent(bool is_transparent) { REX(TRN_PDFDrawSetPageTransparent(mp_draw, BToTB(is_transparent))); } inline void PDFDraw::SetOverprint(PDFRasterizer::OverprintPreviewMode op) { REX(TRN_PDFDrawSetOverprint(mp_draw, (enum TRN_PDFRasterizerOverprintPreviewMode)op)); } inline void PDFDraw::SetAntiAliasing(bool enable_aa) { REX(TRN_PDFDrawSetAntiAliasing(mp_draw,BToTB(enable_aa))); } inline void PDFDraw::SetPathHinting(bool enable_hinting) { REX(TRN_PDFDrawSetPathHinting(mp_draw, BToTB(enable_hinting))); } inline void PDFDraw::SetThinLineAdjustment(bool pixel_grid_fit, bool stroke_adjust) { REX(TRN_PDFDrawSetThinLineAdjustment(mp_draw,BToTB(pixel_grid_fit), BToTB(stroke_adjust))); } inline void PDFDraw::SetImageSmoothing(bool smoothing_enabled, bool hq_image_resampling) { REX(TRN_PDFDrawSetImageSmoothing(mp_draw,BToTB(smoothing_enabled), BToTB(hq_image_resampling))); } inline void PDFDraw::SetCaching(bool enabled) { REX(TRN_PDFDrawSetCaching(mp_draw,BToTB(enabled))); } inline void PDFDraw::SetColorPostProcessMode(PDFRasterizer::ColorPostProcessMode mode) { REX(TRN_PDFDrawSetColorPostProcessMode(mp_draw, (enum TRN_PDFRasterizerColorPostProcessMode)mode)); } inline void PDFDraw::Export(Page page, const UString& filename, const char* format, SDF::Obj encoder_params) { REX(TRN_PDFDrawExport(mp_draw,page.mp_page,filename.mp_impl,format,encoder_params.mp_obj)); } inline void PDFDraw::Export(Page page, Filters::Filter& stream, const char* format, SDF::Obj encoder_params) { REX(TRN_PDFDrawExportStream(mp_draw,page.mp_page,stream.m_impl,format,encoder_params.mp_obj)); } #ifdef __DOTNET inline System::Drawing::Bitmap* PDFDraw::GetBitmap(Page page) { TRN_SystemDrawingBitmap result; REX(TRN_PDFDrawGetBitmapDotNet(mp_draw,page.mp_page,&result)); (System::Drawing::Bitmap*) result; } #endif #ifdef _WIN32 inline void PDFDraw::DrawInRect(Page& page, void* hdc, const Rect& rect) { REX(TRN_PDFDrawDrawInRect(mp_draw,page.mp_page,hdc,(const TRN_Rect*)&rect)); } #endif inline const BitmapInfo PDFDraw::GetBitmap(Page page, PixelFormat pix_fmt, bool demult) { const TRN_UChar* result; int width; int height; int stride; double dpi; REX(TRN_PDFDrawGetBitmap(mp_draw,page.mp_page,&width,&height,&stride,&dpi, (enum TRN_PDFDrawPixelFormat)pix_fmt, demult, &result)); BitmapInfo bmp(width,height,stride,dpi,(char*)result); return bmp; } #ifndef SWIG inline const UChar* PDFDraw::GetBitmap(Page page, int& out_width, int& out_height, int& out_stride, double& out_dpi, PixelFormat pix_fmt, bool demult) { const TRN_UChar* result; REX(TRN_PDFDrawGetBitmap(mp_draw,page.mp_page,&out_width,&out_height,&out_stride,&out_dpi, (enum TRN_PDFDrawPixelFormat)pix_fmt, demult, &result)); return result; } #endif #ifdef SWIG inline void PDFDraw::SetErrorReportProc(Callback* instance) { REX(TRN_PDFDrawSetErrorReportProc(mp_draw,(TRN_RasterizerErrorReportProc)&(Callback::StaticErrorReportProc),instance)); } #else inline void PDFDraw::SetErrorReportProc(PDFRasterizer::ErrorReportProc error_proc, void* data) { REX(TRN_PDFDrawSetErrorReportProc(mp_draw,(TRN_RasterizerErrorReportProc)error_proc,data)); } #endif
#include <cassert> #include <cstdio> #include "../kakuro/KakuroField.h" #include "../kakuro/KakuroProblem.h" #include "Test.h" namespace Penciloid { void PenciloidTest::KakuroTest1() { KakuroProblem prob; prob.Init(3, 3); prob.SetNumberCell(1, 1); prob.SetNumberCell(1, 2); prob.SetNumberCell(2, 1); prob.SetNumberCell(2, 2); prob.SetClue(0, 1, 17, KakuroProblem::CLUE_NONE); prob.SetClue(0, 2, 11, KakuroProblem::CLUE_NONE); prob.SetClue(1, 0, KakuroProblem::CLUE_NONE, 16); prob.SetClue(2, 0, KakuroProblem::CLUE_NONE, 12); KakuroField field; field.Init(prob); field.CheckAll(); field.Debug(); } }
#include <stan/model/model_functional.hpp> #include <gtest/gtest.h>
// Copyright 2018 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "bsdiff/brotli_decompressor.h" #include <memory> #include <string> #include <vector> #include <gtest/gtest.h> namespace { // echo -n "Hello!" | brotli -9 | hexdump -v -e '" " 11/1 "0x%02x, " "\n"' constexpr uint8_t kBrotliHello[] = { 0x8b, 0x02, 0x80, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x21, 0x03, }; } // namespace namespace bsdiff { class BrotliDecompressorTest : public testing::Test { protected: void SetUp() { decompressor_.reset(new BrotliDecompressor()); EXPECT_NE(nullptr, decompressor_.get()); } std::unique_ptr<BrotliDecompressor> decompressor_; }; TEST_F(BrotliDecompressorTest, SmokeTest) { EXPECT_TRUE(decompressor_->SetInputData(kBrotliHello, sizeof(kBrotliHello))); std::vector<uint8_t> output_data(6); EXPECT_TRUE(decompressor_->Read(output_data.data(), output_data.size())); std::string hello = "Hello!"; EXPECT_EQ(std::vector<uint8_t>(hello.begin(), hello.end()), output_data); } TEST_F(BrotliDecompressorTest, ReadingFromEmptyFileTest) { uint8_t data = 0; EXPECT_TRUE(decompressor_->SetInputData(&data, 0)); uint8_t output_data[10]; EXPECT_FALSE(decompressor_->Read(output_data, sizeof(output_data))); } // Check that we fail to read from a truncated file. TEST_F(BrotliDecompressorTest, ReadingFromTruncatedFileTest) { // We feed only half of the compressed file. EXPECT_TRUE( decompressor_->SetInputData(kBrotliHello, sizeof(kBrotliHello) / 2)); uint8_t output_data[6]; EXPECT_FALSE(decompressor_->Read(output_data, sizeof(output_data))); } // Check that we fail to read more than it is available in the file. TEST_F(BrotliDecompressorTest, ReadingMoreThanAvailableTest) { EXPECT_TRUE(decompressor_->SetInputData(kBrotliHello, sizeof(kBrotliHello))); uint8_t output_data[1000]; EXPECT_FALSE(decompressor_->Read(output_data, sizeof(output_data))); } } // namespace bsdiff
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE762_Mismatched_Memory_Management_Routines__delete_array_class_malloc_11.cpp Label Definition File: CWE762_Mismatched_Memory_Management_Routines__delete_array.label.xml Template File: sources-sinks-11.tmpl.cpp */ /* * @description * CWE: 762 Mismatched Memory Management Routines * BadSource: malloc Allocate data using malloc() * GoodSource: Allocate data using new [] * Sinks: * GoodSink: Deallocate data using free() * BadSink : Deallocate data using delete [] * Flow Variant: 11 Control flow: if(globalReturnsTrue()) and if(globalReturnsFalse()) * */ #include "std_testcase.h" namespace CWE762_Mismatched_Memory_Management_Routines__delete_array_class_malloc_11 { #ifndef OMITBAD void bad() { TwoIntsClass * data; /* Initialize data*/ data = NULL; if(globalReturnsTrue()) { /* POTENTIAL FLAW: Allocate memory with a function that requires free() to free the memory */ data = (TwoIntsClass *)malloc(100*sizeof(TwoIntsClass)); if (data == NULL) {exit(-1);} } if(globalReturnsTrue()) { /* POTENTIAL FLAW: Deallocate memory using delete [] - the source memory allocation function may * require a call to free() to deallocate the memory */ delete [] data; } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodB2G1() - use badsource and goodsink by changing the second globalReturnsTrue() to globalReturnsFalse() */ static void goodB2G1() { TwoIntsClass * data; /* Initialize data*/ data = NULL; if(globalReturnsTrue()) { /* POTENTIAL FLAW: Allocate memory with a function that requires free() to free the memory */ data = (TwoIntsClass *)malloc(100*sizeof(TwoIntsClass)); if (data == NULL) {exit(-1);} } if(globalReturnsFalse()) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { /* FIX: Free memory using free() */ free(data); } } /* goodB2G2() - use badsource and goodsink by reversing the blocks in the second if */ static void goodB2G2() { TwoIntsClass * data; /* Initialize data*/ data = NULL; if(globalReturnsTrue()) { /* POTENTIAL FLAW: Allocate memory with a function that requires free() to free the memory */ data = (TwoIntsClass *)malloc(100*sizeof(TwoIntsClass)); if (data == NULL) {exit(-1);} } if(globalReturnsTrue()) { /* FIX: Free memory using free() */ free(data); } } /* goodG2B1() - use goodsource and badsink by changing the first globalReturnsTrue() to globalReturnsFalse() */ static void goodG2B1() { TwoIntsClass * data; /* Initialize data*/ data = NULL; if(globalReturnsFalse()) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { /* FIX: Allocate memory using new [] */ data = new TwoIntsClass[100]; } if(globalReturnsTrue()) { /* POTENTIAL FLAW: Deallocate memory using delete [] - the source memory allocation function may * require a call to free() to deallocate the memory */ delete [] data; } } /* goodG2B2() - use goodsource and badsink by reversing the blocks in the first if */ static void goodG2B2() { TwoIntsClass * data; /* Initialize data*/ data = NULL; if(globalReturnsTrue()) { /* FIX: Allocate memory using new [] */ data = new TwoIntsClass[100]; } if(globalReturnsTrue()) { /* POTENTIAL FLAW: Deallocate memory using delete [] - the source memory allocation function may * require a call to free() to deallocate the memory */ delete [] data; } } void good() { goodB2G1(); goodB2G2(); goodG2B1(); goodG2B2(); } #endif /* OMITGOOD */ } /* close namespace */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN using namespace CWE762_Mismatched_Memory_Management_Routines__delete_array_class_malloc_11; /* so that we can use good and bad easily */ int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
#include "../../include/transitions/hsp_employee_transitions.h" /***************************************************** * class: HspEmployeeTransitions * * Functionality for computing of transitioning * between different agents states for agent that is * a hospital employee * ******************************************************/ // Implement transitions relevant to susceptible int HspEmployeeTransitions::susceptible_transitions(Agent& agent, const double time, Infection& infection, std::vector<Household>& households, std::vector<School>& schools, std::vector<Hospital>& hospitals, std::vector<Transit>& carpools, std::vector<Transit>& public_transit, std::vector<Leisure>& leisure_locations, const std::map<std::string, double>& infection_parameters, std::vector<Agent>& agents, const Testing& testing) { double lambda_tot = 0.0; int got_infected = 0; lambda_tot = compute_susceptible_lambda(agent, time, households, schools, hospitals, carpools, public_transit, leisure_locations); if (infection.infected(lambda_tot) == true){ got_infected = 1; agent.set_inf_variability_factor(infection.inf_variability()); // Infectiousness, latency, and possibility of never developing // symptoms recovery_and_incubation(agent, infection, time, infection_parameters); // Determine if getting tested, how, and when // Remove agent from places if under home isolation if (testing.started(time)){ set_testing_status(agent, infection, time, schools, hospitals, infection_parameters, testing); } } return got_infected; } // Return total lambda of susceptible agent double HspEmployeeTransitions::compute_susceptible_lambda(const Agent& agent, const double time, const std::vector<Household>& households, const std::vector<School>& schools, const std::vector<Hospital>& hospitals, const std::vector<Transit>& carpools, const std::vector<Transit>& public_transit, const std::vector<Leisure>& leisure_locations) { double lambda_tot = 0.0; // Count hospital instead of workplace // Can be a student at the same time const Household& house = households.at(agent.get_household_ID()-1); const Hospital& hospital = hospitals.at(agent.get_hospital_ID()-1); if (agent.student()){ const School& school = schools.at(agent.get_school_ID()-1); lambda_tot = house.get_infected_contribution()+ hospital.get_infected_contribution()+ school.get_infected_contribution(); } else { lambda_tot = house.get_infected_contribution()+ hospital.get_infected_contribution(); } // Transit if (agent.get_work_travel_mode() == "carpool") { lambda_tot += carpools.at(agent.get_carpool_ID()-1).get_infected_contribution(); } if (agent.get_work_travel_mode() == "public") { lambda_tot += public_transit.at(agent.get_public_transit_ID()-1).get_infected_contribution(); } // Leisure if (agent.get_leisure_ID() > 0) { if (agent.get_leisure_type() == "public") { lambda_tot += leisure_locations.at(agent.get_leisure_ID()-1).get_infected_contribution(); } else if (agent.get_leisure_type() == "household") { lambda_tot += households.at(agent.get_leisure_ID()-1).get_infected_contribution(); } else { throw std::invalid_argument("Wrong leisure type: " + agent.get_leisure_type()); } } return lambda_tot; } // Compte and set agent properties related to recovery without symptoms and incubation void HspEmployeeTransitions::recovery_and_incubation(Agent& agent, Infection& infection, const double time, const std::map<std::string, double>& infection_parameters) { // Determine if agent will recover without // becoming symptomatic and update corresponding states bool never_sy = infection.recovering_exposed(agent.get_age()); // Total latency period double latency = infection.latency(); // Portion of latency when the agent is not infectious double dt_ninf = std::min(infection_parameters.at("time from exposed to infectiousness"), latency); if (never_sy){ states_manager.set_susceptible_to_exposed_never_symptomatic(agent); // Set to total latency + infectiousness duration double rec_time = infection_parameters.at("recovery time"); agent.set_latency_duration(latency + rec_time); agent.set_latency_end_time(time); agent.set_infectiousness_start_time(time, dt_ninf); }else{ // If latency shorter, then not infectious during the entire latency states_manager.set_susceptible_to_exposed(agent); agent.set_latency_duration(latency); agent.set_latency_end_time(time); agent.set_infectiousness_start_time(time, dt_ninf); } } // Implement transitions relevant to exposed std::vector<int> HspEmployeeTransitions::exposed_transitions(Agent& agent, Infection& infection, const double time, const double dt, std::vector<Household>& households, std::vector<School>& schools, std::vector<Hospital>& hospitals, std::vector<Transit>& carpools, std::vector<Transit>& public_transit, const std::map<std::string, double>& infection_parameters, const Testing& testing) { std::vector<int> state_changes(5,0); // Modified mortality for hospital emloyees const bool is_hsp = true; // First check for testing because that holds for transition changes too // If being tested if ((agent.tested()) && (agent.get_time_of_test() <= time) && (agent.tested_awaiting_test() == true)){ testing_transitions(agent, time, infection_parameters); state_changes.at(2) = 1; } // If getting test results (this may in principle happen in a // single step) if ((agent.tested()) && (agent.get_time_of_results() <= time) && (agent.tested_awaiting_results() == true)){ testing_results_transitions(agent, time, dt, infection, households, schools, hospitals, carpools, public_transit, infection_parameters); if (agent.tested_covid_positive()){ state_changes.at(3) = 1; } if (agent.tested_false_negative()){ state_changes.at(4) = 1; } } // Check if latency time is over int agent_recovered = 0; if (agent.get_latency_end_time() <= time){ // Reovering without symptoms - remove if (agent.recovering_exposed()){ if (agent.home_isolated()){ add_to_hospitals_and_schools(agent, schools, hospitals, carpools, public_transit); } states_manager.set_exposed_never_symptomatic_to_removed(agent); agent_recovered = 1; } else { // Transition to symptomatic // Adjust flags - common, overwrite for specific states_manager.set_exposed_to_symptomatic(agent); // Hospital employee will go under IH and test for sure remove_from_hospitals_and_schools(agent, schools, hospitals, carpools, public_transit); // Removal settings int agent_age = agent.get_age(); if (infection.will_die_non_icu(agent_age, is_hsp)){ states_manager.set_dying_symptomatic(agent); agent.set_time_to_death(infection.time_to_death()); agent.set_death_time(time); } else { states_manager.set_recovering_symptomatic(agent); // This may change if treatment is ICU agent.set_recovery_duration(infection_parameters.at("recovery time")); agent.set_recovery_time(time); } // Determine testing time and set home isolation - if not yet confirmed and IH if (agent.tested_covid_positive() == false){ // If not waiting for test or results // otherwise don't change if (agent.tested() == false && (testing.started(time))){ set_testing_status(agent, infection, time, schools, hospitals, infection_parameters, testing); } if (agent.tested() == true){ agent.set_home_isolated(true); } }else{ // If already confirmed positive and IH - determine treatment // At this point agent is alred removed from all places except for household // so no need to repeat select_initial_treatment(agent, time, dt, infection, households, schools, hospitals, carpools, public_transit, infection_parameters); } } } state_changes.at(0) = agent_recovered; return state_changes; } // Determine any testing related properties void HspEmployeeTransitions::set_testing_status(Agent& agent, Infection& infection, const double time, std::vector<School>& schools, std::vector<Hospital>& hospitals, const std::map<std::string, double>& infection_parameters, const Testing& testing) { const int n_hospitals = hospitals.size(); bool will_be_tested = false; // Different probability for exposed if (agent.exposed()){ will_be_tested = infection.will_be_tested(testing.get_exp_tested_prob()); if (will_be_tested == true){ agent.set_tested_exposed(true); // Assuming hospital employees test in their respective hospitals // Also - no home isolation until symptoms states_manager.set_exposed_waiting_for_test_in_hospital(agent); // Time to test agent.set_time_to_test(infection_parameters.at("time from decision to test")); agent.set_time_of_test(time); } } else if (agent.symptomatic()) { // Again - testing in the workplace - hospital // with home isolation set elsewhere states_manager.set_waiting_for_test_in_hospital(agent); // Testing-related events - will be adjusted based on other time-dependent scenarios agent.set_time_to_test(infection_parameters.at("time from decision to test")); agent.set_time_of_test(time); } } // Transitions of a symptomatic agent std::vector<int> HspEmployeeTransitions::symptomatic_transitions(Agent& agent, const double time, const double dt, Infection& infection, std::vector<Household>& households, std::vector<School>& schools, std::vector<Hospital>& hospitals, std::vector<Transit>& carpools, std::vector<Transit>& public_transit, const std::map<std::string, double>& infection_parameters) { std::vector<int> state_changes(5,0); int tested_pos = 0; // First entry is one if agent recovered, second if agent died std::vector<int> removed = {0,0}; removed = check_agent_removal(agent, time, households, schools, hospitals, carpools, public_transit); state_changes.at(0) = removed.at(0); state_changes.at(1) = removed.at(1); if (agent.removed() == true){ if (state_changes.at(1) == 1){ // Correct for not tested or false negative and not treated // The not treated is equal to not confirmed positive if (agent.tested_covid_positive() == false){ state_changes.at(1) = 2; } } return state_changes; } // If being tested if ((agent.tested()) && (agent.get_time_of_test() <= time) && (agent.tested_awaiting_test() == true)){ testing_transitions(agent, time, infection_parameters); state_changes.at(2) = 1; return state_changes; } // If getting test results (this may in principle happen in a // single step) if ((agent.tested()) && (agent.get_time_of_results() <= time) && (agent.tested_awaiting_results() == true)){ tested_pos = testing_results_transitions(agent, time, dt, infection, households, schools, hospitals, carpools, public_transit, infection_parameters); state_changes.at(3) = tested_pos; return state_changes; } // Treatment transitions (also possible in a single step) if (agent.being_treated()){ treatment_transitions(agent, time, dt, infection, households, hospitals, infection_parameters); } return state_changes; } // Verify if agent is to be removed at this step std::vector<int> HspEmployeeTransitions::check_agent_removal(Agent& agent, const double time, std::vector<Household>& households, std::vector<School>& schools, std::vector<Hospital>& hospitals, std::vector<Transit>& carpools, std::vector<Transit>& public_transit) { // First entry is one if agent recovered, second if agent died std::vector<int> removed = {0,0}; // If dying if (agent.dying() == true){ if (agent.get_time_of_death() <= time){ removed.at(1) = 1; remove_agent_from_all_places(agent, households, schools, hospitals, carpools, public_transit); states_manager.set_any_to_removed(agent); } } // If recovering if (agent.recovering() == true){ if (agent.get_recovery_time() <= time){ removed.at(0) = 1; if (agent.tested_false_negative() == false){ add_agent_to_all_places(agent, households, schools, hospitals, carpools, public_transit); } states_manager.set_any_to_removed(agent); } } return removed; } // Agent transitions related to testing time void HspEmployeeTransitions::testing_transitions(Agent& agent, const double time, const std::map<std::string, double>& infection_parameters) { // Determine the time agent gets results agent.set_time_until_results(infection_parameters.at("time from test to results")); agent.set_time_of_results(time); states_manager.set_tested_to_awaiting_results(agent); } // Agent transitions upon receiving test results int HspEmployeeTransitions::testing_results_transitions(Agent& agent, const double time, const double dt, Infection& infection, std::vector<Household>& households, std::vector<School>& schools, std::vector<Hospital>& hospitals, std::vector<Transit>& carpools, std::vector<Transit>& public_transit, const std::map<std::string, double>& infection_parameters) { // If false negative, remove testing, put back to exposed // No false negative symptomatic double fneg_prob = infection_parameters.at("fraction false negative"); int tested_pos = 0; if (infection.false_negative_test_result(fneg_prob) == true && agent.exposed() == true){ states_manager.set_tested_false_negative(agent); } else { // If confirmed positive tested_pos = 1; // Exposed - keep in home isolation until symptomatic if (agent.exposed()){ states_manager.set_home_isolation(agent); remove_from_hospitals_and_schools(agent, schools, hospitals, carpools, public_transit); agent.set_tested_covid_positive(true); } else { // Symptomatic - identify treatment select_initial_treatment(agent, time, dt, infection, households, schools, hospitals, carpools, public_transit, infection_parameters); agent.set_tested_covid_positive(true); } } return tested_pos; } // Determine type of intial treatement and its properties void HspEmployeeTransitions::select_initial_treatment(Agent& agent, const double time, const double dt, Infection& infection, std::vector<Household>& households, std::vector<School>& schools, std::vector<Hospital>& hospitals, std::vector<Transit>& carpools, std::vector<Transit>& public_transit, const std::map<std::string, double>& infection_parameters) { if (infection.agent_hospitalized(agent.get_age()) == true){ // Remove agent from all places, then add to a random // hospital for treatment remove_agent_from_all_places(agent, households, schools, hospitals, carpools, public_transit); // But then add to a random hospital int hID = infection.get_random_hospital_ID(hospitals.size()); agent.set_hospital_ID(hID); hospitals.at(hID-1).add_agent(agent.get_ID()); // ICU if (infection.agent_hospitalized_ICU(agent.get_age()) == true){ // Retest for dying if (infection.will_die_ICU()){ states_manager.set_icu_dying(agent); agent.set_time_to_death(infection.time_to_death()); agent.set_death_time(time); }else{ // If recovering - set times and transitions states_manager.set_icu_recovering(agent); // Reset the recovery time to > ICU + hospitalization double t_icu = infection_parameters.at("time in ICU"); double t_hsp_icu = infection_parameters.at("time in hospital after ICU"); agent.set_time_icu_to_hsp(time + t_icu); agent.set_time_hsp_to_ih(time + t_icu + t_hsp_icu); agent.set_recovery_duration(t_icu + t_hsp_icu); agent.set_recovery_time(time); } }else{ // Hospitalized states_manager.set_hospitalized(agent); // If dying, set transition to ICU if (agent.dying() == true){ double dt_icu = infection_parameters.at("time before death to ICU"); double t_icu = std::max(agent.get_time_of_death() - dt_icu, time + dt_icu); agent.set_time_hsp_to_icu(t_icu); }else{ // If recovering, set transition to home double t_rh = agent.get_recovery_time(); double del_t_hsp = infection_parameters.at("time in hospital"); double t_hsp = time + del_t_hsp; if (t_rh > t_hsp){ agent.set_time_hsp_to_ih(t_hsp); }else{ agent.set_time_hsp_to_ih(t_hsp); agent.set_recovery_duration(del_t_hsp); agent.set_recovery_time(time); } } } }else{ // If home isolated states_manager.set_home_isolation(agent); // If dying, set transition to ICU if (agent.dying() == true){ double dt_icu = infection_parameters.at("time before death to ICU"); double t_icu = std::max(agent.get_time_of_death() - dt_icu, time + dt_icu); agent.set_time_ih_to_icu(t_icu); }else{ // If recovering, determine possible transition to hospital double t_rh = agent.get_recovery_time(); double t_hsp = time + infection.get_onset_to_hospitalization(); // Only if transition is later than a step away and earlier than // the recovery time if ((t_rh > t_hsp) && (t_hsp > time + dt)){ agent.set_time_ih_to_hsp(t_hsp); }else{ // Set to past recovery agent.set_time_ih_to_hsp(2.0*t_rh); } } } } // Determine treatment changes void HspEmployeeTransitions::treatment_transitions(Agent& agent, const double time, const double dt, Infection& infection, std::vector<Household>& households, std::vector<Hospital>& hospitals, const std::map<std::string, double>& infection_parameters) { // ICU - can only transition to hospitalization // if not dying if (agent.recovering() && agent.hospitalized_ICU()){ if (agent.get_time_icu_to_hsp() <= time){ agent.set_hospitalized_ICU(false); agent.set_hospitalized(true); } }else if (agent.hospitalized()){ if (agent.dying()){ if (!agent.hospitalized_ICU() && agent.get_time_hsp_to_icu() <= time){ // Transition to ICU agent.set_hospitalized(false); agent.set_hospitalized_ICU(true); } }else{ if (agent.get_time_hsp_to_ih() <= time){ // Transition to home isolation agent.set_hospitalized(false); agent.set_home_isolated(true); // Remove from hospital and add to household int agent_ID = agent.get_ID(); households.at(agent.get_household_ID()-1).add_agent(agent_ID); // Remove agent from hospital hospitals.at(agent.get_hospital_ID()-1).remove_agent(agent_ID); } } }else if (agent.home_isolated()){ if (agent.dying()){ // Will end up in ICU if (agent.get_time_ih_to_icu() <= time){ // Set hospital ID, add to hospital int hID = infection.get_random_hospital_ID(hospitals.size()); agent.set_hospital_ID(hID); hospitals.at(hID-1).add_agent(agent.get_ID()); // Remove from home households.at(agent.get_household_ID()-1).remove_agent(agent.get_ID()); agent.set_home_isolated(false); agent.set_hospitalized(false); agent.set_hospitalized_ICU(true); } } else { double t_hsp = agent.get_time_ih_to_hsp(); // Comparing to dt too, it is currently minimum // for hospitalization; for now assuming // no ICU for recovering - hospitalized if ( (t_hsp >= dt) && (t_hsp <= time) ){ agent.set_home_isolated(false); agent.set_hospitalized(true); // Set hospital ID int hID = infection.get_random_hospital_ID(hospitals.size()); agent.set_hospital_ID(hID); hospitals.at(hID-1).add_agent(agent.get_ID()); // Remove from home households.at(agent.get_household_ID()-1).remove_agent(agent.get_ID()); // Set transition back double t_rh = agent.get_recovery_time(); double del_t_hsp = infection_parameters.at("time in hospital"); double t_hsp = time + del_t_hsp; if (t_rh > t_hsp){ agent.set_time_hsp_to_ih(t_hsp); }else{ agent.set_time_hsp_to_ih(t_hsp); agent.set_recovery_duration(del_t_hsp); agent.set_recovery_time(time); } } } } } // Remove agent's ID from places where they are registered void HspEmployeeTransitions::remove_agent_from_all_places(const Agent& agent, std::vector<Household>& households, std::vector<School>& schools, std::vector<Hospital>& hospitals, std::vector<Transit>& carpools, std::vector<Transit>& public_transit) { // If agent is already removed from a place there is no error // but omitting a check is more efficient int agent_ID = agent.get_ID(); int hs_ID = agent.get_household_ID(); if (hs_ID != 0){ households.at(hs_ID-1).remove_agent(agent_ID); } else { throw std::runtime_error("Symptomatic agent does not have a valid household ID"); } if (agent.student()) { schools.at(agent.get_school_ID()-1).remove_agent(agent_ID); } hospitals.at(agent.get_hospital_ID()-1).remove_agent(agent_ID); if (agent.get_work_travel_mode() == "carpool") { carpools.at(agent.get_carpool_ID()-1).remove_agent(agent_ID); } if (agent.get_work_travel_mode() == "public") { public_transit.at(agent.get_public_transit_ID()-1).remove_agent(agent_ID); } } // Add agent's ID back to the places where they are registered // This is done to keep realistic numbers of people in different places // which influences the probability void HspEmployeeTransitions::add_agent_to_all_places(const Agent& agent, std::vector<Household>& households, std::vector<School>& schools, std::vector<Hospital>& hospitals, std::vector<Transit>& carpools, std::vector<Transit>& public_transit) { int agent_ID = agent.get_ID(); // ICU should always transition to hospital first if (agent.hospitalized_ICU()){ throw std::runtime_error("Attempting recovery of an agent directly from ICU"); } if (agent.student()){ schools.at(agent.get_school_ID()-1).add_agent(agent_ID); } if (agent.hospitalized()){ households.at(agent.get_household_ID()-1).add_agent(agent_ID); hospitals.at(agent.get_hospital_ID()-1).remove_agent(agent_ID); } hospitals.at(agent.get_hospital_ID()-1).add_agent(agent_ID); if (agent.get_work_travel_mode() == "carpool") { carpools.at(agent.get_carpool_ID()-1).add_agent(agent_ID); } if (agent.get_work_travel_mode() == "public") { public_transit.at(agent.get_public_transit_ID()-1).add_agent(agent_ID); } } // Remove agent from hospitals and schools for home isolation void HspEmployeeTransitions::remove_from_hospitals_and_schools(const Agent& agent, std::vector<School>& schools, std::vector<Hospital>& hospitals, std::vector<Transit>& carpools, std::vector<Transit>& public_transit) { int agent_ID = agent.get_ID(); hospitals.at(agent.get_hospital_ID()-1).remove_agent(agent_ID); if (agent.student()){ schools.at(agent.get_school_ID()-1).remove_agent(agent_ID); } if (agent.get_work_travel_mode() == "carpool") { carpools.at(agent.get_carpool_ID()-1).remove_agent(agent_ID); } if (agent.get_work_travel_mode() == "public") { public_transit.at(agent.get_public_transit_ID()-1).remove_agent(agent_ID); } } // Add agent to hospitals and schools from home isolation void HspEmployeeTransitions::add_to_hospitals_and_schools(const Agent& agent, std::vector<School>& schools, std::vector<Hospital>& hospitals, std::vector<Transit>& carpools, std::vector<Transit>& public_transit) { int agent_ID = agent.get_ID(); if (agent.student()) { schools.at(agent.get_school_ID()-1).add_agent(agent_ID); } hospitals.at(agent.get_hospital_ID()-1).add_agent(agent_ID); if (agent.get_work_travel_mode() == "carpool") { carpools.at(agent.get_carpool_ID()-1).add_agent(agent_ID); } if (agent.get_work_travel_mode() == "public") { public_transit.at(agent.get_public_transit_ID()-1).add_agent(agent_ID); } }
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "minddata/dataset/engine/datasetops/source/qmnist_op.h" #include <algorithm> #include <fstream> #include <iomanip> #include <set> #include <utility> #include "debug/common.h" #include "minddata/dataset/core/config_manager.h" #include "minddata/dataset/core/tensor_shape.h" #include "minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" #include "minddata/dataset/engine/execution_tree.h" #include "utils/file_utils.h" #include "utils/ms_utils.h" namespace mindspore { namespace dataset { const int32_t kQMnistLabelFileMagicNumber = 3074; const int32_t kQMnistImageRows = 28; const int32_t kQMnistImageCols = 28; const int32_t kQMnistLabelLength = 8; QMnistOp::QMnistOp(const std::string &folder_path, const std::string &usage, bool compat, std::unique_ptr<DataSchema> data_schema, std::shared_ptr<SamplerRT> sampler, int32_t num_workers, int32_t queue_size) : MnistOp(usage, num_workers, folder_path, queue_size, std::move(data_schema), std::move(sampler)), compat_(compat) {} void QMnistOp::Print(std::ostream &out, bool show_all) const { if (!show_all) { // Call the super class for displaying any common 1-liner info ParallelOp::Print(out, show_all); // Then show any custom derived-internal 1-liner info for this op out << "\n"; } else { // Call the super class for displaying any common detailed info ParallelOp::Print(out, show_all); // Then show any custom derived-internal stuff out << "\nNumber of rows: " << num_rows_ << "\n" << DatasetName(true) << " directory: " << folder_path_ << "\nUsage: " << usage_ << "\nCompat: " << (compat_ ? "yes" : "no") << "\n\n"; } } // Load 1 TensorRow (image, label) using 1 MnistLabelPair or QMnistImageInfoPair. Status QMnistOp::LoadTensorRow(row_id_type row_id, TensorRow *trow) { RETURN_UNEXPECTED_IF_NULL(trow); std::shared_ptr<Tensor> image, label; if (compat_) { MnistLabelPair qmnist_pair = image_label_pairs_[row_id]; RETURN_IF_NOT_OK(Tensor::CreateFromTensor(qmnist_pair.first, &image)); RETURN_IF_NOT_OK(Tensor::CreateScalar(qmnist_pair.second, &label)); } else { QMnistImageInfoPair qmnist_pair = image_info_pairs_[row_id]; RETURN_IF_NOT_OK(Tensor::CreateFromTensor(qmnist_pair.first, &image)); RETURN_IF_NOT_OK(Tensor::CreateFromTensor(qmnist_pair.second, &label)); } (*trow) = TensorRow(row_id, {std::move(image), std::move(label)}); trow->setPath({image_path_[row_id], label_path_[row_id]}); return Status::OK(); } Status QMnistOp::CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count) { RETURN_UNEXPECTED_IF_NULL(count); *count = 0; auto schema = std::make_unique<DataSchema>(); RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kCv, 1))); TensorShape scalar = TensorShape::CreateScalar(); RETURN_IF_NOT_OK( schema->AddColumn(ColDescriptor("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar))); const int64_t num_samples = 0; const int64_t start_index = 0; auto sampler = std::make_shared<SequentialSamplerRT>(start_index, num_samples); std::shared_ptr<ConfigManager> cfg = GlobalContext::config_manager(); int32_t num_workers = cfg->num_parallel_workers(); int32_t op_connector_size = cfg->op_connector_size(); // compat does not affect the count result, so set it to true default. auto op = std::make_shared<QMnistOp>(dir, usage, true, std::move(schema), std::move(sampler), num_workers, op_connector_size); // the logic of counting the number of samples RETURN_IF_NOT_OK(op->WalkAllFiles()); for (size_t i = 0; i < op->image_names_.size(); ++i) { std::ifstream image_reader; image_reader.open(op->image_names_[i], std::ios::binary); std::ifstream label_reader; label_reader.open(op->label_names_[i], std::ios::binary); uint32_t num_images; RETURN_IF_NOT_OK(op->CheckImage(op->image_names_[i], &image_reader, &num_images)); uint32_t num_labels; RETURN_IF_NOT_OK(op->CheckLabel(op->label_names_[i], &label_reader, &num_labels)); CHECK_FAIL_RETURN_UNEXPECTED((num_images == num_labels), "Invalid data, num of images should be equal to num of labels loading from " + dir + ", but got num of images: " + std::to_string(num_images) + ", num of labels: " + std::to_string(num_labels) + "."); if (usage == "test10k") { // only use the first 10k samples and drop the last 50k samples num_images = 10000; num_labels = 10000; } else if (usage == "test50k") { // only use the last 50k samples and drop the first 10k samples num_images = 50000; num_labels = 50000; } *count = *count + num_images; // Close the readers image_reader.close(); label_reader.close(); } return Status::OK(); } Status QMnistOp::WalkAllFiles() { const std::string image_ext = "images-idx3-ubyte"; const std::string label_ext = "labels-idx2-int"; const std::string train_prefix = "qmnist-train"; const std::string test_prefix = "qmnist-test"; const std::string nist_prefix = "xnist"; auto real_folder_path = FileUtils::GetRealPath(folder_path_.data()); CHECK_FAIL_RETURN_UNEXPECTED(real_folder_path.has_value(), "Invalid QMnist folder, " + folder_path_ + " does not exist or permission denied!"); Path root_dir(real_folder_path.value()); if (usage_ == "train") { image_names_.push_back((root_dir / Path(train_prefix + "-" + image_ext)).ToString()); label_names_.push_back((root_dir / Path(train_prefix + "-" + label_ext)).ToString()); } else if (usage_ == "test" || usage_ == "test10k" || usage_ == "test50k") { image_names_.push_back((root_dir / Path(test_prefix + "-" + image_ext)).ToString()); label_names_.push_back((root_dir / Path(test_prefix + "-" + label_ext)).ToString()); } else if (usage_ == "nist") { image_names_.push_back((root_dir / Path(nist_prefix + "-" + image_ext)).ToString()); label_names_.push_back((root_dir / Path(nist_prefix + "-" + label_ext)).ToString()); } else if (usage_ == "all") { image_names_.push_back((root_dir / Path(train_prefix + "-" + image_ext)).ToString()); label_names_.push_back((root_dir / Path(train_prefix + "-" + label_ext)).ToString()); image_names_.push_back((root_dir / Path(test_prefix + "-" + image_ext)).ToString()); label_names_.push_back((root_dir / Path(test_prefix + "-" + label_ext)).ToString()); image_names_.push_back((root_dir / Path(nist_prefix + "-" + image_ext)).ToString()); label_names_.push_back((root_dir / Path(nist_prefix + "-" + label_ext)).ToString()); } CHECK_FAIL_RETURN_UNEXPECTED( image_names_.size() == label_names_.size(), "Invalid data, num of Qmnist image files should be equal to num of Qmnist label files under directory:" + folder_path_ + ", but got num of image files: " + std::to_string(image_names_.size()) + ", num of label files: " + std::to_string(label_names_.size()) + "."); for (size_t i = 0; i < image_names_.size(); i++) { Path file_path(image_names_[i]); CHECK_FAIL_RETURN_UNEXPECTED( file_path.Exists() && !file_path.IsDirectory(), "Invalid file path, Qmnist data file: " + file_path.ToString() + " does not exist or is a directory."); MS_LOG(INFO) << DatasetName(true) << " operator found image file at " << file_path.ToString() << "."; } for (size_t i = 0; i < label_names_.size(); i++) { Path file_path(label_names_[i]); CHECK_FAIL_RETURN_UNEXPECTED( file_path.Exists() && !file_path.IsDirectory(), "Invalid file path, Qmnist data file: " + file_path.ToString() + " does not exist or is a directory."); MS_LOG(INFO) << DatasetName(true) << " operator found label file at " << file_path.ToString() << "."; } return Status::OK(); } Status QMnistOp::ReadImageAndLabel(std::ifstream *image_reader, std::ifstream *label_reader, size_t index) { RETURN_UNEXPECTED_IF_NULL(image_reader); RETURN_UNEXPECTED_IF_NULL(label_reader); uint32_t num_images, num_labels; RETURN_IF_NOT_OK(CheckImage(image_names_[index], image_reader, &num_images)); RETURN_IF_NOT_OK(CheckLabel(label_names_[index], label_reader, &num_labels)); CHECK_FAIL_RETURN_UNEXPECTED((num_images == num_labels), "Invalid data, num of images should be equal to num of labels loading from " + folder_path_ + ", but got num of images: " + std::to_string(num_images) + ", num of labels: " + std::to_string(num_labels) + "."); // The image size of the QMNIST dataset is fixed at [28,28] int64_t image_size = kQMnistImageRows * kQMnistImageCols; int64_t label_length = kQMnistLabelLength; if (usage_ == "test10k") { // only use the first 10k samples and drop the last 50k samples num_images = 10000; num_labels = 10000; } else if (usage_ == "test50k") { num_images = 50000; num_labels = 50000; // skip the first 10k samples for ifstream reader (void)image_reader->ignore(image_size * 10000); (void)label_reader->ignore(label_length * 10000 * 4); } auto images_buf = std::make_unique<char[]>(image_size * num_images); auto labels_buf = std::make_unique<uint32_t[]>(label_length * num_labels); if (images_buf == nullptr || labels_buf == nullptr) { std::string err_msg = "[Internal ERROR] Failed to allocate memory for " + DatasetName() + " buffer."; MS_LOG(ERROR) << err_msg.c_str(); RETURN_STATUS_UNEXPECTED(err_msg); } (void)image_reader->read(images_buf.get(), image_size * num_images); if (image_reader->fail()) { RETURN_STATUS_UNEXPECTED("Invalid file, failed to read " + std::to_string(image_size * num_images) + " bytes from " + image_names_[index] + ": the data file is damaged or the content is incomplete."); } // uint32_t use 4 bytes in memory (void)label_reader->read(reinterpret_cast<char *>(labels_buf.get()), label_length * num_labels * 4); if (label_reader->fail()) { RETURN_STATUS_UNEXPECTED("Invalid file, failed to read " + std::to_string(label_length * num_labels * 4) + " bytes from " + label_names_[index] + ": the data file is damaged or content is incomplete."); } TensorShape image_tensor_shape = TensorShape({kQMnistImageRows, kQMnistImageCols, 1}); TensorShape label_tensor_shape = TensorShape({kQMnistLabelLength}); for (int64_t data_index = 0; data_index != num_images; data_index++) { auto image = &images_buf[data_index * image_size]; for (int64_t image_index = 0; image_index < image_size; image_index++) { image[image_index] = (image[image_index] == 0) ? 0 : 255; } std::shared_ptr<Tensor> image_tensor; RETURN_IF_NOT_OK(Tensor::CreateFromMemory(image_tensor_shape, data_schema_->Column(0).Type(), reinterpret_cast<unsigned char *>(image), &image_tensor)); auto label = &labels_buf[data_index * label_length]; for (int64_t label_index = 0; label_index < label_length; label_index++) { label[label_index] = SwapEndian(label[label_index]); } std::shared_ptr<Tensor> label_tensor; RETURN_IF_NOT_OK(Tensor::CreateFromMemory(label_tensor_shape, data_schema_->Column(1).Type(), reinterpret_cast<unsigned char *>(label), &label_tensor)); image_info_pairs_.emplace_back(std::make_pair(image_tensor, label_tensor)); image_label_pairs_.emplace_back(std::make_pair(image_tensor, label[0])); image_path_.push_back(image_names_[index]); label_path_.push_back(label_names_[index]); } return Status::OK(); } Status QMnistOp::CheckLabel(const std::string &file_name, std::ifstream *label_reader, uint32_t *num_labels) { RETURN_UNEXPECTED_IF_NULL(label_reader); RETURN_UNEXPECTED_IF_NULL(num_labels); CHECK_FAIL_RETURN_UNEXPECTED(label_reader->is_open(), "Invalid file, failed to open " + file_name + ": the label file is permission denied."); int64_t label_len = label_reader->seekg(0, std::ios::end).tellg(); (void)label_reader->seekg(0, std::ios::beg); // The first 12 bytes of the label file are type, number and length CHECK_FAIL_RETURN_UNEXPECTED(label_len >= 12, "Invalid file, load " + file_name + " failed: the first 12 bytes of the label file should be type, number and length, " + "but got the first read bytes : " + std::to_string(label_len)); uint32_t magic_number; RETURN_IF_NOT_OK(ReadFromReader(label_reader, &magic_number)); CHECK_FAIL_RETURN_UNEXPECTED(magic_number == kQMnistLabelFileMagicNumber, "Invalid label file, the number of labels loading from " + file_name + " should be " + std::to_string(kQMnistLabelFileMagicNumber) + ", but got " + std::to_string(magic_number) + "."); uint32_t num_items; RETURN_IF_NOT_OK(ReadFromReader(label_reader, &num_items)); uint32_t length; RETURN_IF_NOT_OK(ReadFromReader(label_reader, &length)); CHECK_FAIL_RETURN_UNEXPECTED(length == kQMnistLabelLength, "Invalid data, length of every label loading from " + file_name + " should be equal to 8, but got " + std::to_string(length) + "."); CHECK_FAIL_RETURN_UNEXPECTED((label_len - 12) == num_items * kQMnistLabelLength * 4, "Invalid data, the total bytes of labels loading from Qmnist label file: " + file_name + " should be " + std::to_string(label_len - 12) + ", but got " + std::to_string(num_items * kQMnistLabelLength * 4) + "."); *num_labels = num_items; return Status::OK(); } } // namespace dataset } // namespace mindspore
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // The following only applies to changes made to this file as part of YugaByte development. // // Portions Copyright (c) YugaByte, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the License // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express // or implied. See the License for the specific language governing permissions and limitations // under the License. // // ================================================================================================ // // The catalog manager handles the current list of tables // and tablets in the cluster, as well as their current locations. // Since most operations in the master go through these data // structures, locking is carefully managed here to prevent unnecessary // contention and deadlocks: // // - each structure has an internal spinlock used for operations that // are purely in-memory (eg the current status of replicas) // - data that is persisted on disk is stored in separate PersistentTable(t)Info // structs. These are managed using copy-on-write so that writers may block // writing them back to disk while not impacting concurrent readers. // // Usage rules: // - You may obtain READ locks in any order. READ locks should never block, // since they only conflict with COMMIT which is a purely in-memory operation. // Thus they are deadlock-free. // - If you need a WRITE lock on both a table and one or more of its tablets, // acquire the lock on the table first. This strict ordering prevents deadlocks. // // ================================================================================================ #include "yb/master/catalog_manager.h" #include <stdlib.h> #include <algorithm> #include <bitset> #include <functional> #include <mutex> #include <set> #include <unordered_map> #include <vector> #include <boost/optional.hpp> #include <boost/thread/shared_mutex.hpp> #include <glog/logging.h> #include <google/protobuf/text_format.h> #include "yb/common/common.pb.h" #include "yb/common/common_flags.h" #include "yb/common/partial_row.h" #include "yb/common/partition.h" #include "yb/common/roles_permissions.h" #include "yb/common/wire_protocol.h" #include "yb/consensus/consensus.h" #include "yb/consensus/consensus.proxy.h" #include "yb/consensus/consensus_peers.h" #include "yb/consensus/quorum_util.h" #include "yb/gutil/algorithm.h" #include "yb/gutil/atomicops.h" #include "yb/gutil/map-util.h" #include "yb/gutil/mathlimits.h" #include "yb/gutil/stl_util.h" #include "yb/gutil/strings/escaping.h" #include "yb/gutil/strings/join.h" #include "yb/gutil/strings/substitute.h" #include "yb/gutil/sysinfo.h" #include "yb/gutil/walltime.h" #include "yb/master/async_rpc_tasks.h" #include "yb/master/backfill_index.h" #include "yb/master/catalog_entity_info.h" #include "yb/master/catalog_loaders.h" #include "yb/master/catalog_manager_bg_tasks.h" #include "yb/master/catalog_manager-internal.h" #include "yb/master/catalog_manager_util.h" #include "yb/master/cluster_balance.h" #include "yb/master/encryption_manager.h" #include "yb/master/master.h" #include "yb/master/master.pb.h" #include "yb/master/master.proxy.h" #include "yb/master/master_error.h" #include "yb/master/master_fwd.h" #include "yb/master/master_util.h" #include "yb/master/permissions_manager.h" #include "yb/master/sys_catalog_constants.h" #include "yb/master/sys_catalog_initialization.h" #include "yb/master/sys_catalog.h" #include "yb/master/system_tablet.h" #include "yb/master/tasks_tracker.h" #include "yb/master/ts_descriptor.h" #include "yb/master/ts_manager.h" #include "yb/master/yql_aggregates_vtable.h" #include "yb/master/yql_auth_resource_role_permissions_index.h" #include "yb/master/yql_auth_role_permissions_vtable.h" #include "yb/master/yql_auth_roles_vtable.h" #include "yb/master/yql_columns_vtable.h" #include "yb/master/yql_empty_vtable.h" #include "yb/master/yql_functions_vtable.h" #include "yb/master/yql_indexes_vtable.h" #include "yb/master/yql_keyspaces_vtable.h" #include "yb/master/yql_local_vtable.h" #include "yb/master/yql_partitions_vtable.h" #include "yb/master/yql_peers_vtable.h" #include "yb/master/yql_size_estimates_vtable.h" #include "yb/master/yql_tables_vtable.h" #include "yb/master/yql_triggers_vtable.h" #include "yb/master/yql_types_vtable.h" #include "yb/master/yql_views_vtable.h" #include "yb/tserver/ts_tablet_manager.h" #include "yb/tablet/operations/change_metadata_operation.h" #include "yb/tablet/tablet.h" #include "yb/tablet/tablet_metadata.h" #include "yb/tablet/tablet_retention_policy.h" #include "yb/tserver/tserver_admin.proxy.h" #include "yb/util/crypt.h" #include "yb/util/debug-util.h" #include "yb/util/debug/trace_event.h" #include "yb/util/flag_tags.h" #include "yb/util/logging.h" #include "yb/util/math_util.h" #include "yb/util/monotime.h" #include "yb/util/random_util.h" #include "yb/util/rw_mutex.h" #include "yb/util/scope_exit.h" #include "yb/util/size_literals.h" #include "yb/util/status.h" #include "yb/util/stopwatch.h" #include "yb/util/sync_point.h" #include "yb/util/thread.h" #include "yb/util/thread_restrictions.h" #include "yb/util/threadpool.h" #include "yb/util/trace.h" #include "yb/util/tsan_util.h" #include "yb/util/uuid.h" #include "yb/client/client.h" #include "yb/client/client-internal.h" #include "yb/client/meta_cache.h" #include "yb/client/table_creator.h" #include "yb/client/table_handle.h" #include "yb/client/yb_table_name.h" #include "yb/tserver/remote_bootstrap_client.h" #include "yb/tserver/remote_bootstrap_snapshots.h" #include "yb/yql/redis/redisserver/redis_constants.h" #include "yb/yql/pgwrapper/pg_wrapper.h" #include "yb/util/shared_lock.h" using namespace std::literals; using namespace yb::size_literals; DEFINE_int32(master_ts_rpc_timeout_ms, 30 * 1000, // 30 sec "Timeout used for the Master->TS async rpc calls."); TAG_FLAG(master_ts_rpc_timeout_ms, advanced); DEFINE_int32(tablet_creation_timeout_ms, 30 * 1000, // 30 sec "Timeout used by the master when attempting to create tablet " "replicas during table creation."); TAG_FLAG(tablet_creation_timeout_ms, advanced); DEFINE_test_flag(bool, disable_tablet_deletion, false, "Whether catalog manager should disable tablet deletion."); DEFINE_bool(catalog_manager_wait_for_new_tablets_to_elect_leader, true, "Whether the catalog manager should wait for a newly created tablet to " "elect a leader before considering it successfully created. " "This is disabled in some tests where we explicitly manage leader " "election."); TAG_FLAG(catalog_manager_wait_for_new_tablets_to_elect_leader, hidden); DEFINE_int32(catalog_manager_inject_latency_in_delete_table_ms, 0, "Number of milliseconds that the master will sleep in DeleteTable."); TAG_FLAG(catalog_manager_inject_latency_in_delete_table_ms, hidden); DECLARE_int32(catalog_manager_bg_task_wait_ms); DEFINE_int32(replication_factor, 3, "Default number of replicas for tables that do not have the num_replicas set."); TAG_FLAG(replication_factor, advanced); DEFINE_int32(max_create_tablets_per_ts, 50, "The number of tablets per TS that can be requested for a new table."); TAG_FLAG(max_create_tablets_per_ts, advanced); DEFINE_int32(catalog_manager_report_batch_size, 1, "The max number of tablets evaluated in the heartbeat as a single SysCatalog update."); TAG_FLAG(catalog_manager_report_batch_size, advanced); DEFINE_int32(master_failover_catchup_timeout_ms, 30 * 1000 * yb::kTimeMultiplier, // 30 sec "Amount of time to give a newly-elected leader master to load" " the previous master's metadata and become active. If this time" " is exceeded, the node crashes."); TAG_FLAG(master_failover_catchup_timeout_ms, advanced); TAG_FLAG(master_failover_catchup_timeout_ms, experimental); DEFINE_bool(master_tombstone_evicted_tablet_replicas, true, "Whether the Master should tombstone (delete) tablet replicas that " "are no longer part of the latest reported raft config."); TAG_FLAG(master_tombstone_evicted_tablet_replicas, hidden); DECLARE_bool(master_ignore_deleted_on_load); // Temporary. Can be removed after long-run testing. DEFINE_bool(master_ignore_stale_cstate, true, "Whether Master processes the raft config when the version is lower."); TAG_FLAG(master_ignore_stale_cstate, hidden); DEFINE_bool(catalog_manager_check_ts_count_for_create_table, true, "Whether the master should ensure that there are enough live tablet " "servers to satisfy the provided replication count before allowing " "a table to be created."); TAG_FLAG(catalog_manager_check_ts_count_for_create_table, hidden); METRIC_DEFINE_gauge_uint32(cluster, num_tablet_servers_live, "Number of live tservers in the cluster", yb::MetricUnit::kUnits, "The number of tablet servers that have responded or done a heartbeat " "in the time interval defined by the gflag " "FLAGS_tserver_unresponsive_timeout_ms."); METRIC_DEFINE_gauge_uint32(cluster, num_tablet_servers_dead, "Number of dead tservers in the cluster", yb::MetricUnit::kUnits, "The number of tablet servers that have not responded or done a " "heartbeat in the time interval defined by the gflag " "FLAGS_tserver_unresponsive_timeout_ms."); DEFINE_test_flag(uint64, inject_latency_during_remote_bootstrap_secs, 0, "Number of seconds to sleep during a remote bootstrap."); DEFINE_test_flag(uint64, inject_latency_during_tablet_report_ms, 0, "Number of milliseconds to sleep during the processing of a tablet batch."); DEFINE_test_flag(bool, catalog_manager_simulate_system_table_create_failure, false, "This is only used in tests to simulate a failure where the table information is " "persisted in syscatalog, but the tablet information is not yet persisted and " "there is a failure."); DEFINE_string(cluster_uuid, "", "Cluster UUID to be used by this cluster"); TAG_FLAG(cluster_uuid, hidden); DECLARE_int32(yb_num_shards_per_tserver); DEFINE_uint64(transaction_table_num_tablets, 0, "Number of tablets to use when creating the transaction status table." "0 to use the same default num tablets as for regular tables."); DEFINE_bool(master_enable_metrics_snapshotter, false, "Should metrics snapshotter be enabled"); DEFINE_uint64(metrics_snapshots_table_num_tablets, 0, "Number of tablets to use when creating the metrics snapshots table." "0 to use the same default num tablets as for regular tables."); DEFINE_bool(disable_index_backfill, false, "A kill switch to disable multi-stage backfill for YCQL indexes."); TAG_FLAG(disable_index_backfill, runtime); TAG_FLAG(disable_index_backfill, hidden); DEFINE_bool(disable_index_backfill_for_non_txn_tables, true, "A kill switch to disable multi-stage backfill for user enforced YCQL indexes. " "Note that enabling this feature may cause the create index flow to be slow. " "This is needed to ensure the safety of the index backfill process. See also " "index_backfill_upperbound_for_user_enforced_txn_duration_ms"); TAG_FLAG(disable_index_backfill_for_non_txn_tables, runtime); TAG_FLAG(disable_index_backfill_for_non_txn_tables, hidden); DEFINE_bool(enable_transactional_ddl_gc, true, "A kill switch for transactional DDL GC. Temporary safety measure."); TAG_FLAG(enable_transactional_ddl_gc, runtime); TAG_FLAG(enable_transactional_ddl_gc, hidden); DEFINE_bool( hide_pg_catalog_table_creation_logs, false, "Whether to hide detailed log messages for PostgreSQL catalog table creation. " "This cuts down test logs significantly."); TAG_FLAG(hide_pg_catalog_table_creation_logs, hidden); DEFINE_test_flag(int32, simulate_slow_table_create_secs, 0, "Simulates a slow table creation by sleeping after the table has been added to memory."); DEFINE_test_flag(int32, simulate_slow_system_tablet_bootstrap_secs, 0, "Simulates a slow tablet bootstrap by adding a sleep before system tablet init."); DEFINE_test_flag(bool, return_error_if_namespace_not_found, false, "Return an error from ListTables if a namespace id is not found in the map"); DEFINE_test_flag(bool, hang_on_namespace_transition, false, "Used in tests to simulate a lapse between issuing a namespace op and final processing."); DEFINE_test_flag(bool, simulate_crash_after_table_marked_deleting, false, "Crash yb-master after table's state is set to DELETING. This skips tablets deletion."); DEFINE_bool(master_drop_table_after_task_response, true, "Mark a table as DELETED as soon as we get all the responses from all the TS."); TAG_FLAG(master_drop_table_after_task_response, advanced); TAG_FLAG(master_drop_table_after_task_response, runtime); DECLARE_int32(yb_client_admin_operation_timeout_sec); DEFINE_test_flag(bool, tablegroup_master_only, false, "This is only for MasterTest to be able to test tablegroups without the" " transaction status table being created."); DEFINE_bool(enable_register_ts_from_raft, true, "Whether to register a tserver from the consensus " "information of a reported tablet."); DECLARE_int32(tserver_unresponsive_timeout_ms); DEFINE_bool(use_create_table_leader_hint, true, "Whether the Master should hint which replica for each tablet should " "be leader initially on tablet creation."); TAG_FLAG(use_create_table_leader_hint, runtime); DEFINE_test_flag(bool, create_table_leader_hint_min_lexicographic, false, "Whether the Master should hint replica with smallest lexicographic rank for each " "tablet as leader initially on tablet creation."); DEFINE_int32(tablet_split_limit_per_table, 256, "Limit of the number of tablets per table for tablet splitting. Limitation is " "disabled if this value is set to 0."); DEFINE_double(heartbeat_safe_deadline_ratio, .20, "When the heartbeat deadline has this percentage of time remaining, " "the master should halt tablet report processing so it can respond in time."); DECLARE_int32(heartbeat_rpc_timeout_ms); DECLARE_CAPABILITY(TabletReportLimit); DEFINE_int32(partitions_vtable_cache_refresh_secs, 0, "Amount of time to wait before refreshing the system.partitions cached vtable."); DEFINE_int32(txn_table_wait_min_ts_count, 1, "Minimum Number of TS to wait for before creating the transaction status table." " Default value is 1. We wait for atleast --replication_factor if this value" " is smaller than that"); TAG_FLAG(txn_table_wait_min_ts_count, advanced); DEFINE_bool(enable_ysql_tablespaces_for_placement, true, "If set, tablespaces will be used for placement of YSQL tables."); TAG_FLAG(enable_ysql_tablespaces_for_placement, runtime); DEFINE_int32(ysql_tablespace_info_refresh_secs, 30, "Frequency at which the table to tablespace information will be updated in master " "from pg catalog tables. A value of -1 disables the refresh task."); TAG_FLAG(ysql_tablespace_info_refresh_secs, runtime); DEFINE_int64(tablet_split_size_threshold_bytes, 0, "DEPRECATED -- Threshold on tablet size after which tablet should be split. Automated " "splitting is disabled if this value is set to 0."); TAG_FLAG(tablet_split_size_threshold_bytes, hidden); DEFINE_int64(tablet_split_low_phase_shard_count_per_node, 1, "The per-node tablet count until which a table is splitting at the phase 1 threshold, " "as defined by tablet_split_low_phase_size_threshold_bytes."); DEFINE_int64(tablet_split_high_phase_shard_count_per_node, 32, "The per-node tablet count until which a table is splitting at the phase 2 threshold, " "as defined by tablet_split_high_phase_size_threshold_bytes."); DEFINE_int64(tablet_split_low_phase_size_threshold_bytes, 1_GB, "The tablet size threshold at which to split tablets in phase 1. " "See tablet_split_low_phase_shard_count_per_node."); DEFINE_int64(tablet_split_high_phase_size_threshold_bytes, 10_GB, "The tablet size threshold at which to split tablets in phase 2. " "See tablet_split_high_phase_shard_count_per_node."); DEFINE_int64(tablet_force_split_threshold_bytes, 50_GB, "The tablet size threshold at which to split tablets regardless of how many tablets " "exist in the table already. This should be configured to prevent runaway whale " "tablets from forming in your cluster even if both automatic splitting phases have " "been finished."); DEFINE_test_flag(bool, crash_server_on_sys_catalog_leader_affinity_move, false, "When set, crash the master process if it performs a sys catalog leader affinity " "move."); DEFINE_int32(blacklist_progress_initial_delay_secs, yb::master::kDelayAfterFailoverSecs, "When a master leader failsover, the time until which the progress of load movement " "off the blacklisted tservers is reported as 0. This initial delay " "gives sufficient time for heartbeats so that we don't report" " a premature incorrect completion."); TAG_FLAG(blacklist_progress_initial_delay_secs, runtime); DEFINE_test_flag(bool, validate_all_tablet_candidates, false, "When set to true, consider any tablet a valid candidate for splitting. " "Specifically this flag ensures that ValidateSplitCandidate always returns OK and " "all tablets are considered valid candidates for splitting."); DEFINE_test_flag(bool, select_all_tablets_for_split, false, "When set to true, select all validated processed tablets for split. Specifically " "this flag ensures that ShouldSplitValidTablet always returns true."); DEFINE_test_flag(bool, skip_placement_validation_createtable_api, false, "When set, it skips checking that all the tablets of a table have enough tservers" " conforming to the table placement policy during CreateTable API call."); TAG_FLAG(TEST_skip_placement_validation_createtable_api, runtime); DEFINE_bool(enable_tablet_split_of_pitr_tables, true, "When set, it enables automatic tablet splitting of tables covered by " "Point In Time Restore schedules."); TAG_FLAG(enable_tablet_split_of_pitr_tables, runtime); DEFINE_bool(enable_tablet_split_of_xcluster_replicated_tables, false, "When set, it enables automatic tablet splitting for tables that are part of an " "xCluster replication setup"); TAG_FLAG(enable_tablet_split_of_xcluster_replicated_tables, runtime); TAG_FLAG(enable_tablet_split_of_xcluster_replicated_tables, hidden); DEFINE_test_flag(int32, slowdown_alter_table_rpcs_ms, 0, "Slows down the alter table rpc's send and response handler so that the TServer " "has a heartbeat delay and triggers tablet leader change."); DEFINE_test_flag(bool, reject_delete_not_serving_tablet_rpc, false, "Whether to reject DeleteNotServingTablet RPC."); namespace yb { namespace master { using std::atomic; using std::shared_ptr; using std::string; using std::unique_ptr; using std::vector; using namespace std::placeholders; using base::subtle::NoBarrier_Load; using base::subtle::NoBarrier_CompareAndSwap; using consensus::kMinimumTerm; using consensus::CONSENSUS_CONFIG_COMMITTED; using consensus::CONSENSUS_CONFIG_ACTIVE; using consensus::COMMITTED_OPID; using consensus::Consensus; using consensus::ConsensusMetadata; using consensus::ConsensusServiceProxy; using consensus::ConsensusStatePB; using consensus::GetConsensusRole; using consensus::RaftPeerPB; using consensus::StartRemoteBootstrapRequestPB; using rpc::RpcContext; using strings::Substitute; using tablet::TABLET_DATA_COPYING; using tablet::TABLET_DATA_DELETED; using tablet::TABLET_DATA_READY; using tablet::TABLET_DATA_TOMBSTONED; using tablet::TabletDataState; using tablet::RaftGroupMetadata; using tablet::RaftGroupMetadataPtr; using tablet::TabletPeer; using tablet::RaftGroupStatePB; using tablet::TabletStatusListener; using tablet::TabletStatusPB; using tserver::HandleReplacingStaleTablet; using tserver::TabletServerErrorPB; using master::MasterServiceProxy; using yb::pgwrapper::PgWrapper; using yb::server::MasterAddressesToString; using yb::client::YBClient; using yb::client::YBClientBuilder; using yb::client::YBColumnSchema; using yb::client::YBSchema; using yb::client::YBSchemaBuilder; using yb::client::YBTable; using yb::client::YBTableCreator; using yb::client::YBTableName; namespace { // Macros to access index information in CATALOG. // // NOTES from file master.proto for SysTablesEntryPB. // - For index table: [to be deprecated and replaced by "index_info"] // optional bytes indexed_table_id = 13; // Indexed table id of this index. // optional bool is_local_index = 14 [ default = false ]; // Whether this is a local index. // optional bool is_unique_index = 15 [ default = false ]; // Whether this is a unique index. // - During transition period, we have to consider both fields and the following macros help // avoiding duplicate protobuf version check thru out our code. const std::string& GetIndexedTableId(const SysTablesEntryPB& pb) { return pb.has_index_info() ? pb.index_info().indexed_table_id() : pb.indexed_table_id(); } #define PROTO_GET_IS_LOCAL(tabpb) \ (tabpb.has_index_info() ? tabpb.index_info().is_local() \ : tabpb.is_local_index()) #define PROTO_GET_IS_UNIQUE(tabpb) \ (tabpb.has_index_info() ? tabpb.index_info().is_unique() \ : tabpb.is_unique_index()) template <class PB> bool IsIndex(const PB& pb) { return pb.has_index_info() || !pb.indexed_table_id().empty(); } bool IsTable(const SysTablesEntryPB& pb) { return !IsIndex(pb); } #define PROTO_PTR_IS_INDEX(tabpb) \ (tabpb->has_index_info() || !tabpb->indexed_table_id().empty()) #define PROTO_PTR_IS_TABLE(tabpb) \ (!tabpb->has_index_info() && tabpb->indexed_table_id().empty()) #if (0) // Once the deprecated fields are obsolete, the above macros should be defined as the following. #define GetIndexedTableId(tabpb) (tabpb.index_info().indexed_table_id()) #define PROTO_GET_IS_LOCAL(tabpb) (tabpb.index_info().is_local()) #define PROTO_GET_IS_UNIQUE(tabpb) (tabpb.index_info().is_unique()) #define PROTO_IS_INDEX(tabpb) (tabpb.has_index_info()) #define PROTO_IS_TABLE(tabpb) (!tabpb.has_index_info()) #define PROTO_PTR_IS_INDEX(tabpb) (tabpb->has_index_info()) #define PROTO_PTR_IS_TABLE(tabpb) (!tabpb->has_index_info()) #endif class IndexInfoBuilder { public: explicit IndexInfoBuilder(IndexInfoPB* index_info) : index_info_(*index_info) { DVLOG(3) << " After " << __PRETTY_FUNCTION__ << " index_info_ is " << yb::ToString(index_info_); } void ApplyProperties(const TableId& indexed_table_id, bool is_local, bool is_unique) { index_info_.set_indexed_table_id(indexed_table_id); index_info_.set_version(0); index_info_.set_is_local(is_local); index_info_.set_is_unique(is_unique); DVLOG(3) << " After " << __PRETTY_FUNCTION__ << " index_info_ is " << yb::ToString(index_info_); } CHECKED_STATUS ApplyColumnMapping(const Schema& indexed_schema, const Schema& index_schema) { for (size_t i = 0; i < index_schema.num_columns(); i++) { const auto& col_name = index_schema.column(i).name(); const auto indexed_col_idx = indexed_schema.find_column(col_name); if (PREDICT_FALSE(indexed_col_idx == Schema::kColumnNotFound)) { return STATUS(NotFound, "The indexed table column does not exist", col_name); } auto* col = index_info_.add_columns(); col->set_column_id(index_schema.column_id(i)); col->set_indexed_column_id(indexed_schema.column_id(indexed_col_idx)); } index_info_.set_hash_column_count(index_schema.num_hash_key_columns()); index_info_.set_range_column_count(index_schema.num_range_key_columns()); for (size_t i = 0; i < indexed_schema.num_hash_key_columns(); i++) { index_info_.add_indexed_hash_column_ids(indexed_schema.column_id(i)); } for (size_t i = indexed_schema.num_hash_key_columns(); i < indexed_schema.num_key_columns(); i++) { index_info_.add_indexed_range_column_ids(indexed_schema.column_id(i)); } DVLOG(3) << " After " << __PRETTY_FUNCTION__ << " index_info_ is " << yb::ToString(index_info_); return Status::OK(); } private: IndexInfoPB& index_info_; }; template<class Lock, class RespClass> Status CheckIfTableDeletedOrNotVisibleToClient(const Lock& lock, RespClass* resp) { // This covers both in progress and fully deleted objects. if (lock->started_deleting()) { Status s = STATUS_SUBSTITUTE(NotFound, "The object '$0.$1' does not exist", lock->namespace_id(), lock->name()); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } if (!lock->visible_to_client()) { Status s = STATUS_SUBSTITUTE(ServiceUnavailable, "The object '$0.$1' is not running", lock->namespace_id(), lock->name()); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } return Status::OK(); } #define VERIFY_NAMESPACE_FOUND(expr, resp) RESULT_CHECKER_HELPER( \ expr, \ if (!__result.ok()) { \ return SetupError((resp)->mutable_error(), __result.status()); \ }); MasterErrorPB_Code NamespaceMasterError(SysNamespaceEntryPB_State state) { switch (state) { case SysNamespaceEntryPB::PREPARING: FALLTHROUGH_INTENDED; case SysNamespaceEntryPB::DELETING: return MasterErrorPB::IN_TRANSITION_CAN_RETRY; case SysNamespaceEntryPB::DELETED: FALLTHROUGH_INTENDED; case SysNamespaceEntryPB::FAILED: FALLTHROUGH_INTENDED; case SysNamespaceEntryPB::RUNNING: return MasterErrorPB::INTERNAL_ERROR; default: FATAL_INVALID_ENUM_VALUE(SysNamespaceEntryPB_State, state); } } size_t GetNameMapperIndex(YQLDatabase db_type) { switch (db_type) { case YQL_DATABASE_UNKNOWN: break; case YQL_DATABASE_CQL: return 1; case YQL_DATABASE_PGSQL: return 2; case YQL_DATABASE_REDIS: return 3; } CHECK(false) << "Unexpected db type " << db_type; return 0; } bool IsIndexBackfillEnabled(TableType table_type, bool is_transactional) { // Fetch the runtime flag to prevent any issues from the updates to flag while processing. const bool disabled = (table_type == PGSQL_TABLE_TYPE ? GetAtomicFlag(&FLAGS_ysql_disable_index_backfill) : GetAtomicFlag(&FLAGS_disable_index_backfill) || (!is_transactional && GetAtomicFlag(&FLAGS_disable_index_backfill_for_non_txn_tables))); return !disabled; } constexpr auto kDefaultYQLPartitionsRefreshBgTaskSleep = 10s; void FillRetainedBySnapshotSchedules( const SnapshotSchedulesToObjectIdsMap& schedules_to_tables_map, const TableId& table_id, RepeatedBytes* retained_by_snapshot_schedules) { for (const auto& entry : schedules_to_tables_map) { if (std::binary_search(entry.second.begin(), entry.second.end(), table_id)) { retained_by_snapshot_schedules->Add()->assign( entry.first.AsSlice().cdata(), entry.first.size()); } } } } // anonymous namespace //////////////////////////////////////////////////////////// // CatalogManager //////////////////////////////////////////////////////////// CatalogManager::NamespaceInfoMap& CatalogManager::NamespaceNameMapper::operator[]( YQLDatabase db_type) { return typed_maps_[GetNameMapperIndex(db_type)]; } const CatalogManager::NamespaceInfoMap& CatalogManager::NamespaceNameMapper::operator[]( YQLDatabase db_type) const { return typed_maps_[GetNameMapperIndex(db_type)]; } void CatalogManager::NamespaceNameMapper::clear() { for (auto& m : typed_maps_) { m.clear(); } } CatalogManager::CatalogManager(Master* master) : master_(master), rng_(GetRandomSeed32()), tablet_exists_(false), state_(kConstructed), leader_ready_term_(-1), leader_lock_(RWMutex::Priority::PREFER_WRITING), load_balance_policy_(std::make_unique<ClusterLoadBalancer>(this)), permissions_manager_(std::make_unique<PermissionsManager>(this)), tasks_tracker_(new TasksTracker(IsUserInitiated::kFalse)), jobs_tracker_(new TasksTracker(IsUserInitiated::kTrue)), encryption_manager_(new EncryptionManager()), ysql_transaction_(this, master_), tablespace_manager_(std::make_shared<YsqlTablespaceManager>(nullptr, nullptr)), tablespace_bg_task_running_(false), tablet_split_manager_(this, this) { yb::InitCommonFlags(); CHECK_OK(ThreadPoolBuilder("leader-initialization") .set_max_threads(1) .Build(&leader_initialization_pool_)); CHECK_OK(ThreadPoolBuilder("CatalogManagerBGTasks").Build(&background_tasks_thread_pool_)); ysql_transaction_.set_thread_pool(background_tasks_thread_pool_.get()); CHECK_OK(ThreadPoolBuilder("async-tasks") .Build(&async_task_pool_)); if (master_) { sys_catalog_.reset(new SysCatalogTable( master_, master_->metric_registry(), Bind(&CatalogManager::ElectedAsLeaderCb, Unretained(this)))); } } CatalogManager::~CatalogManager() { if (StartShutdown()) { CompleteShutdown(); } } Status CatalogManager::Init() { { std::lock_guard<simple_spinlock> l(state_lock_); CHECK_EQ(kConstructed, state_); state_ = kStarting; } // Initialize the metrics emitted by the catalog manager. metric_num_tablet_servers_live_ = METRIC_num_tablet_servers_live.Instantiate(master_->metric_entity_cluster(), 0); metric_num_tablet_servers_dead_ = METRIC_num_tablet_servers_dead.Instantiate(master_->metric_entity_cluster(), 0); RETURN_NOT_OK_PREPEND(InitSysCatalogAsync(), "Failed to initialize sys tables async"); if (PREDICT_FALSE(FLAGS_TEST_simulate_slow_system_tablet_bootstrap_secs > 0)) { LOG_WITH_PREFIX(INFO) << "Simulating slow system tablet bootstrap"; SleepFor(MonoDelta::FromSeconds(FLAGS_TEST_simulate_slow_system_tablet_bootstrap_secs)); } // WaitUntilRunning() must run outside of the lock as to prevent // deadlock. This is safe as WaitUntilRunning waits for another // thread to finish its work and doesn't itself depend on any state // within CatalogManager. Need not start sys catalog or background tasks // when we are started in shell mode. if (!master_->opts().IsShellMode()) { RETURN_NOT_OK_PREPEND(sys_catalog_->WaitUntilRunning(), "Failed waiting for the catalog tablet to run"); std::vector<consensus::RaftPeerPB> masters_raft; RETURN_NOT_OK(master_->ListRaftConfigMasters(&masters_raft)); HostPortSet hps; for (const auto& peer : masters_raft) { if (master_->instance_pb().permanent_uuid() == peer.permanent_uuid()) { continue; } HostPort hp = HostPortFromPB(DesiredHostPort(peer, master_->MakeCloudInfoPB())); hps.insert(hp); } RETURN_NOT_OK(encryption_manager_->AddPeersToGetUniverseKeyFrom(hps)); RETURN_NOT_OK(EnableBgTasks()); } RETURN_NOT_OK_PREPEND(tablet_split_manager_.Init(), "Failed to initialize tablet split manager."); // Cache the server registration even for shell mode masters. See // https://github.com/yugabyte/yugabyte-db/issues/8065. RETURN_NOT_OK(GetRegistration(&server_registration_)); { std::lock_guard<simple_spinlock> l(state_lock_); CHECK_EQ(kStarting, state_); state_ = kRunning; } Started(); return Status::OK(); } Status CatalogManager::ChangeEncryptionInfo(const ChangeEncryptionInfoRequestPB* req, ChangeEncryptionInfoResponsePB* resp) { return STATUS(InvalidCommand, "Command only supported in enterprise build."); } Status CatalogManager::ElectedAsLeaderCb() { time_elected_leader_ = MonoTime::Now(); return leader_initialization_pool_->SubmitClosure( Bind(&CatalogManager::LoadSysCatalogDataTask, Unretained(this))); } Status CatalogManager::WaitUntilCaughtUpAsLeader(const MonoDelta& timeout) { string uuid = master_->fs_manager()->uuid(); Consensus* consensus = tablet_peer()->consensus(); ConsensusStatePB cstate = consensus->ConsensusState(CONSENSUS_CONFIG_ACTIVE); if (!cstate.has_leader_uuid() || cstate.leader_uuid() != uuid) { return STATUS_SUBSTITUTE(IllegalState, "Node $0 not leader. Consensus state: $1", uuid, cstate.ShortDebugString()); } // Wait for all transactions to be committed. const CoarseTimePoint deadline = CoarseMonoClock::now() + timeout; { tablet::HistoryCutoffPropagationDisabler disabler(tablet_peer()->tablet()->RetentionPolicy()); RETURN_NOT_OK(tablet_peer()->operation_tracker()->WaitForAllToFinish(timeout)); } RETURN_NOT_OK(tablet_peer()->consensus()->WaitForLeaderLeaseImprecise(deadline)); return Status::OK(); } void CatalogManager::LoadSysCatalogDataTask() { auto consensus = tablet_peer()->shared_consensus(); const int64_t term = consensus->ConsensusState(CONSENSUS_CONFIG_ACTIVE).current_term(); Status s = WaitUntilCaughtUpAsLeader( MonoDelta::FromMilliseconds(FLAGS_master_failover_catchup_timeout_ms)); int64_t term_after_wait = consensus->ConsensusState(CONSENSUS_CONFIG_ACTIVE).current_term(); if (term_after_wait != term) { // If we got elected leader again while waiting to catch up then we will get another callback to // update state from sys_catalog, so bail now. // // If we failed when waiting, i.e. could not acquire a leader lease, this could be due to us // becoming a follower. If we're not partitioned away, we'll know about a new term soon. LOG_WITH_PREFIX(INFO) << "Term change from " << term << " to " << term_after_wait << " while waiting for master leader catchup. Not loading sys catalog metadata. " << "Status of waiting: " << s; return; } if (!s.ok()) { // This could happen e.g. if we are a partitioned-away leader that failed to acquire a leader // lease. // // TODO: handle this cleanly by transitioning to a follower without crashing. LOG_WITH_PREFIX(WARNING) << "Failed waiting for node to catch up after master election: " << s; if (s.IsTimedOut()) { LOG_WITH_PREFIX(FATAL) << "Shutting down due to unavailability of other masters after" << " election. TODO: Abdicate instead."; } return; } LOG_WITH_PREFIX(INFO) << "Loading table and tablet metadata into memory for term " << term; LOG_SLOW_EXECUTION(WARNING, 1000, LogPrefix() + "Loading metadata into memory") { Status status = VisitSysCatalog(term); if (!status.ok()) { { std::lock_guard<simple_spinlock> l(state_lock_); if (state_ == kClosing) { LOG_WITH_PREFIX(INFO) << "Error loading sys catalog; because shutdown is in progress. term " << term << " status : " << status; return; } } auto new_term = consensus->ConsensusState(CONSENSUS_CONFIG_ACTIVE).current_term(); if (new_term != term) { LOG_WITH_PREFIX(INFO) << "Error loading sys catalog; but that's OK as term was changed from " << term << " to " << new_term << ": " << status; return; } LOG_WITH_PREFIX(FATAL) << "Failed to load sys catalog: " << status; } } { std::lock_guard<simple_spinlock> l(state_lock_); leader_ready_term_ = term; LOG_WITH_PREFIX(INFO) << "Completed load of sys catalog in term " << term; } SysCatalogLoaded(term); } CHECKED_STATUS CatalogManager::WaitForWorkerPoolTests(const MonoDelta& timeout) const { if (!async_task_pool_->WaitFor(timeout)) { return STATUS(TimedOut, "Worker Pool hasn't finished processing tasks"); } return Status::OK(); } Status CatalogManager::VisitSysCatalog(int64_t term) { // Block new catalog operations, and wait for existing operations to finish. LOG_WITH_PREFIX(INFO) << __func__ << ": Wait on leader_lock_ for any existing operations to finish."; auto start = std::chrono::steady_clock::now(); std::lock_guard<RWMutex> leader_lock_guard(leader_lock_); auto finish = std::chrono::steady_clock::now(); static const auto kLongLockAcquisitionLimit = RegularBuildVsSanitizers(100ms, 750ms); if (finish > start + kLongLockAcquisitionLimit) { LOG_WITH_PREFIX(WARNING) << "Long wait on leader_lock_: " << yb::ToString(finish - start); } LOG_WITH_PREFIX(INFO) << __func__ << ": Acquire catalog manager lock_ before loading sys catalog."; LockGuard lock(mutex_); VLOG_WITH_FUNC(3) << "Acquired the catalog manager lock"; // Abort any outstanding tasks. All TableInfos are orphaned below, so // it's important to end their tasks now; otherwise Shutdown() will // destroy master state used by these tasks. std::vector<scoped_refptr<TableInfo>> tables; AppendValuesFromMap(*table_ids_map_, &tables); AbortAndWaitForAllTasks(tables); // Clear internal maps and run data loaders. RETURN_NOT_OK(RunLoaders(term)); // Prepare various default system configurations. RETURN_NOT_OK(PrepareDefaultSysConfig(term)); if ((FLAGS_use_initial_sys_catalog_snapshot || FLAGS_enable_ysql) && !FLAGS_initial_sys_catalog_snapshot_path.empty() && !FLAGS_create_initial_sys_catalog_snapshot) { if (!namespace_ids_map_.empty() || !system_tablets_.empty()) { LOG_WITH_PREFIX(INFO) << "This is an existing cluster, not initializing from a sys catalog snapshot."; } else { Result<bool> dir_exists = Env::Default()->DoesDirectoryExist(FLAGS_initial_sys_catalog_snapshot_path); if (dir_exists.ok() && *dir_exists) { bool initdb_was_already_done = false; { auto l = ysql_catalog_config_->LockForRead(); initdb_was_already_done = l->pb.ysql_catalog_config().initdb_done(); } if (initdb_was_already_done) { LOG_WITH_PREFIX(INFO) << "initdb has been run before, no need to restore sys catalog from " << "the initial snapshot"; } else { LOG_WITH_PREFIX(INFO) << "Restoring snapshot in sys catalog"; Status restore_status = RestoreInitialSysCatalogSnapshot( FLAGS_initial_sys_catalog_snapshot_path, sys_catalog_->tablet_peer().get(), term); if (!restore_status.ok()) { LOG_WITH_PREFIX(ERROR) << "Failed restoring snapshot in sys catalog"; return restore_status; } LOG_WITH_PREFIX(INFO) << "Re-initializing cluster config"; cluster_config_.reset(); RETURN_NOT_OK(PrepareDefaultClusterConfig(term)); LOG_WITH_PREFIX(INFO) << "Restoring snapshot completed, considering initdb finished"; RETURN_NOT_OK(InitDbFinished(Status::OK(), term)); RETURN_NOT_OK(RunLoaders(term)); } } else { LOG_WITH_PREFIX(WARNING) << "Initial sys catalog snapshot directory does not exist: " << FLAGS_initial_sys_catalog_snapshot_path << (dir_exists.ok() ? "" : ", status: " + dir_exists.status().ToString()); } } } // Create the system namespaces (created only if they don't already exist). RETURN_NOT_OK(PrepareDefaultNamespaces(term)); // Create the system tables (created only if they don't already exist). RETURN_NOT_OK(PrepareSystemTables(term)); // Create the default cassandra (created only if they don't already exist). RETURN_NOT_OK(permissions_manager_->PrepareDefaultRoles(term)); // If this is the first time we start up, we have no config information as default. We write an // empty version 0. RETURN_NOT_OK(PrepareDefaultClusterConfig(term)); permissions_manager_->BuildRecursiveRoles(); if (FLAGS_enable_ysql) { // Number of TS to wait for before creating the txn table. auto wait_ts_count = std::max(FLAGS_txn_table_wait_min_ts_count, FLAGS_replication_factor); LOG_WITH_PREFIX(INFO) << "YSQL is enabled, will create the transaction status table when " << wait_ts_count << " tablet servers are online"; master_->ts_manager()->SetTSCountCallback(wait_ts_count, [this, wait_ts_count] { LOG_WITH_PREFIX(INFO) << wait_ts_count << " tablet servers registered, creating the transaction status table"; // Retry table creation until it succeedes. It might fail initially because placement UUID // of live replicas is set through an RPC from YugaWare, and we won't be able to calculate // the number of primary (non-read-replica) tablet servers until that happens. while (true) { const auto s = CreateTransactionsStatusTableIfNeeded(/* rpc */ nullptr); if (s.ok()) { break; } LOG_WITH_PREFIX(WARNING) << "Failed creating transaction status table, waiting: " << s; if (s.IsShutdownInProgress()) { return; } auto role = Role(); if (role != RaftPeerPB::LEADER) { LOG_WITH_PREFIX(WARNING) << "Cancel creating transaction because of role: " << RaftPeerPB::Role_Name(role); return; } SleepFor(MonoDelta::FromSeconds(1)); } LOG_WITH_PREFIX(INFO) << "Finished creating transaction status table asynchronously"; }); } if (!StartRunningInitDbIfNeeded(term)) { // If we are not running initdb, this is an existing cluster, and we need to check whether we // need to do a one-time migration to make YSQL system catalog tables transactional. RETURN_NOT_OK(MakeYsqlSysCatalogTablesTransactional( table_ids_map_.CheckOut().get_ptr(), sys_catalog_.get(), ysql_catalog_config_.get(), term)); } return Status::OK(); } template <class Loader> Status CatalogManager::Load(const std::string& title, const int64_t term) { LOG_WITH_PREFIX(INFO) << __func__ << ": Loading " << title << " into memory."; std::unique_ptr<Loader> loader = std::make_unique<Loader>(this, term); RETURN_NOT_OK_PREPEND( sys_catalog_->Visit(loader.get()), "Failed while visiting " + title + " in sys catalog"); return Status::OK(); } Status CatalogManager::RunLoaders(int64_t term) { // Clear the table and tablet state. table_names_map_.clear(); auto table_ids_map_checkout = table_ids_map_.CheckOut(); table_ids_map_checkout->clear(); auto tablet_map_checkout = tablet_map_.CheckOut(); tablet_map_checkout->clear(); // Clear the namespace mappings. namespace_ids_map_.clear(); namespace_names_mapper_.clear(); // Clear the type mappings. udtype_ids_map_.clear(); udtype_names_map_.clear(); // Clear the current cluster config. cluster_config_.reset(); // Clear redis config mapping. redis_config_map_.clear(); // Clear ysql catalog config. ysql_catalog_config_.reset(); // Clear recent tasks. tasks_tracker_->Reset(); // Clear recent jobs. jobs_tracker_->Reset(); std::vector<std::shared_ptr<TSDescriptor>> descs; master_->ts_manager()->GetAllDescriptors(&descs); for (const auto& ts_desc : descs) { ts_desc->set_has_tablet_report(false); } { LockGuard lock(permissions_manager()->mutex()); // Clear the roles mapping. permissions_manager()->ClearRolesUnlocked(); RETURN_NOT_OK(Load<RoleLoader>("roles", term)); RETURN_NOT_OK(Load<SysConfigLoader>("sys config", term)); } // Clear the hidden tablets vector. hidden_tablets_.clear(); RETURN_NOT_OK(Load<TableLoader>("tables", term)); RETURN_NOT_OK(Load<TabletLoader>("tablets", term)); RETURN_NOT_OK(Load<NamespaceLoader>("namespaces", term)); RETURN_NOT_OK(Load<UDTypeLoader>("user-defined types", term)); RETURN_NOT_OK(Load<ClusterConfigLoader>("cluster configuration", term)); RETURN_NOT_OK(Load<RedisConfigLoader>("Redis config", term)); return Status::OK(); } Status CatalogManager::CheckResource( const GrantRevokePermissionRequestPB* req, GrantRevokePermissionResponsePB* resp) { scoped_refptr<TableInfo> table; // Checking if resources exist. if (req->resource_type() == ResourceType::TABLE || req->resource_type() == ResourceType::KEYSPACE) { // We can't match Apache Cassandra's error because when a namespace is not provided, the error // is detected by the semantic analysis in PTQualifiedName::AnalyzeName. DCHECK(req->has_namespace_()); const auto& namespace_info = req->namespace_(); auto ns = FindNamespace(namespace_info); if (req->resource_type() == ResourceType::KEYSPACE) { if (!ns.ok()) { // Matches Apache Cassandra's error. Status s = STATUS_SUBSTITUTE( NotFound, "Resource <keyspace $0> doesn't exist", namespace_info.name()); return SetupError(resp->mutable_error(), MasterErrorPB::NAMESPACE_NOT_FOUND, s); } } else { if (ns.ok()) { CatalogManager::SharedLock l(mutex_); table = FindPtrOrNull(table_names_map_, {(**ns).id(), req->resource_name()}); } if (table == nullptr) { // Matches Apache Cassandra's error. Status s = STATUS_SUBSTITUTE( NotFound, "Resource <object '$0.$1'> doesn't exist", namespace_info.name(), req->resource_name()); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } } } return Status::OK(); } Status CatalogManager::PrepareDefaultClusterConfig(int64_t term) { if (cluster_config_) { LOG_WITH_PREFIX(INFO) << "Cluster configuration has already been set up, skipping re-initialization."; return Status::OK(); } // Create default. SysClusterConfigEntryPB config; config.set_version(0); std::string cluster_uuid_source; if (!FLAGS_cluster_uuid.empty()) { Uuid uuid; RETURN_NOT_OK(uuid.FromString(FLAGS_cluster_uuid)); config.set_cluster_uuid(FLAGS_cluster_uuid); cluster_uuid_source = "from the --cluster_uuid flag"; } else { auto uuid = Uuid::Generate(); config.set_cluster_uuid(to_string(uuid)); cluster_uuid_source = "(randomly generated)"; } LOG_WITH_PREFIX(INFO) << "Setting cluster UUID to " << config.cluster_uuid() << " " << cluster_uuid_source; // Create in memory object. cluster_config_ = new ClusterConfigInfo(); // Prepare write. auto l = cluster_config_->LockForWrite(); l.mutable_data()->pb = std::move(config); // Write to sys_catalog and in memory. RETURN_NOT_OK(sys_catalog_->Upsert(term, cluster_config_)); l.Commit(); return Status::OK(); } Status CatalogManager::PrepareDefaultSysConfig(int64_t term) { { LockGuard lock(permissions_manager()->mutex()); RETURN_NOT_OK(permissions_manager()->PrepareDefaultSecurityConfigUnlocked(term)); } if (!ysql_catalog_config_) { SysYSQLCatalogConfigEntryPB ysql_catalog_config; ysql_catalog_config.set_version(0); // Create in memory objects. ysql_catalog_config_ = new SysConfigInfo(kYsqlCatalogConfigType); // Prepare write. auto l = ysql_catalog_config_->LockForWrite(); *l.mutable_data()->pb.mutable_ysql_catalog_config() = std::move(ysql_catalog_config); // Write to sys_catalog and in memory. RETURN_NOT_OK(sys_catalog_->Upsert(term, ysql_catalog_config_)); l.Commit(); } return Status::OK(); } bool CatalogManager::StartRunningInitDbIfNeeded(int64_t term) { if (!ShouldAutoRunInitDb(ysql_catalog_config_.get(), pg_proc_exists_)) { return false; } string master_addresses_str = MasterAddressesToString( *master_->opts().GetMasterAddresses()); initdb_future_ = std::async(std::launch::async, [this, master_addresses_str, term] { if (FLAGS_create_initial_sys_catalog_snapshot) { initial_snapshot_writer_.emplace(); } Status status = PgWrapper::InitDbForYSQL( master_addresses_str, "/tmp", master_->GetSharedMemoryFd()); if (FLAGS_create_initial_sys_catalog_snapshot && status.ok()) { Status write_snapshot_status = initial_snapshot_writer_->WriteSnapshot( sys_catalog_->tablet_peer()->tablet(), FLAGS_initial_sys_catalog_snapshot_path); if (!write_snapshot_status.ok()) { status = write_snapshot_status; } } Status finish_status = InitDbFinished(status, term); if (!finish_status.ok()) { if (status.ok()) { status = finish_status; } LOG_WITH_PREFIX(WARNING) << "Failed to set initdb as finished in sys catalog: " << finish_status; } return status; }); return true; } Status CatalogManager::PrepareDefaultNamespaces(int64_t term) { RETURN_NOT_OK(PrepareNamespace( YQL_DATABASE_CQL, kSystemNamespaceName, kSystemNamespaceId, term)); RETURN_NOT_OK(PrepareNamespace( YQL_DATABASE_CQL, kSystemSchemaNamespaceName, kSystemSchemaNamespaceId, term)); RETURN_NOT_OK(PrepareNamespace( YQL_DATABASE_CQL, kSystemAuthNamespaceName, kSystemAuthNamespaceId, term)); return Status::OK(); } Status CatalogManager::PrepareSystemTables(int64_t term) { // Prepare sys catalog table. RETURN_NOT_OK(PrepareSysCatalogTable(term)); // Create the required system tables here. RETURN_NOT_OK((PrepareSystemTableTemplate<PeersVTable>( kSystemPeersTableName, kSystemNamespaceName, kSystemNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<LocalVTable>( kSystemLocalTableName, kSystemNamespaceName, kSystemNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<YQLKeyspacesVTable>( kSystemSchemaKeyspacesTableName, kSystemSchemaNamespaceName, kSystemSchemaNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<YQLTablesVTable>( kSystemSchemaTablesTableName, kSystemSchemaNamespaceName, kSystemSchemaNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<YQLColumnsVTable>( kSystemSchemaColumnsTableName, kSystemSchemaNamespaceName, kSystemSchemaNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<YQLSizeEstimatesVTable>( kSystemSizeEstimatesTableName, kSystemNamespaceName, kSystemNamespaceId, term))); // Empty tables. RETURN_NOT_OK((PrepareSystemTableTemplate<YQLAggregatesVTable>( kSystemSchemaAggregatesTableName, kSystemSchemaNamespaceName, kSystemSchemaNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<YQLFunctionsVTable>( kSystemSchemaFunctionsTableName, kSystemSchemaNamespaceName, kSystemSchemaNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<YQLIndexesVTable>( kSystemSchemaIndexesTableName, kSystemSchemaNamespaceName, kSystemSchemaNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<YQLTriggersVTable>( kSystemSchemaTriggersTableName, kSystemSchemaNamespaceName, kSystemSchemaNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<YQLViewsVTable>( kSystemSchemaViewsTableName, kSystemSchemaNamespaceName, kSystemSchemaNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<QLTypesVTable>( kSystemSchemaTypesTableName, kSystemSchemaNamespaceName, kSystemSchemaNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<YQLPartitionsVTable>( kSystemPartitionsTableName, kSystemNamespaceName, kSystemNamespaceId, term))); // System auth tables. RETURN_NOT_OK((PrepareSystemTableTemplate<YQLAuthRolesVTable>( kSystemAuthRolesTableName, kSystemAuthNamespaceName, kSystemAuthNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<YQLAuthRolePermissionsVTable>( kSystemAuthRolePermissionsTableName, kSystemAuthNamespaceName, kSystemAuthNamespaceId, term))); RETURN_NOT_OK((PrepareSystemTableTemplate<YQLAuthResourceRolePermissionsIndexVTable>( kSystemAuthResourceRolePermissionsIndexTableName, kSystemAuthNamespaceName, kSystemAuthNamespaceId, term))); // Ensure kNumSystemTables is in-sync with the system tables created. LOG_IF(DFATAL, system_tablets_.size() != kNumSystemTables) << "kNumSystemTables is " << kNumSystemTables << " but " << system_tablets_.size() << " tables were created"; // Cache the system.partitions tablet so we can access it in RebuildYQLSystemPartitions. RETURN_NOT_OK(GetYQLPartitionsVTable(&system_partitions_tablet_)); return Status::OK(); } Status CatalogManager::PrepareSysCatalogTable(int64_t term) { // Prepare sys catalog table info. auto sys_catalog_table_iter = table_ids_map_->find(kSysCatalogTableId); if (sys_catalog_table_iter == table_ids_map_->end()) { scoped_refptr<TableInfo> table = NewTableInfo(kSysCatalogTableId); table->mutable_metadata()->StartMutation(); SysTablesEntryPB& metadata = table->mutable_metadata()->mutable_dirty()->pb; metadata.set_state(SysTablesEntryPB::RUNNING); metadata.set_namespace_id(kSystemSchemaNamespaceId); metadata.set_name(kSysCatalogTableName); metadata.set_table_type(TableType::YQL_TABLE_TYPE); SchemaToPB(sys_catalog_->schema_, metadata.mutable_schema()); metadata.set_version(0); auto table_ids_map_checkout = table_ids_map_.CheckOut(); sys_catalog_table_iter = table_ids_map_checkout->emplace(table->id(), table).first; table_names_map_[{kSystemSchemaNamespaceId, kSysCatalogTableName}] = table; table->set_is_system(); RETURN_NOT_OK(sys_catalog_->Upsert(term, table)); table->mutable_metadata()->CommitMutation(); } // Prepare sys catalog tablet info. if (tablet_map_->count(kSysCatalogTabletId) == 0) { scoped_refptr<TableInfo> table = sys_catalog_table_iter->second; scoped_refptr<TabletInfo> tablet(new TabletInfo(table, kSysCatalogTabletId)); tablet->mutable_metadata()->StartMutation(); SysTabletsEntryPB& metadata = tablet->mutable_metadata()->mutable_dirty()->pb; metadata.set_state(SysTabletsEntryPB::RUNNING); auto l = table->LockForRead(); PartitionSchema partition_schema; RETURN_NOT_OK(PartitionSchema::FromPB(l->pb.partition_schema(), sys_catalog_->schema_, &partition_schema)); vector<Partition> partitions; RETURN_NOT_OK(partition_schema.CreatePartitions(1, &partitions)); partitions[0].ToPB(metadata.mutable_partition()); metadata.set_table_id(table->id()); metadata.add_table_ids(table->id()); table->set_is_system(); table->AddTablet(tablet.get()); auto tablet_map_checkout = tablet_map_.CheckOut(); (*tablet_map_checkout)[tablet->tablet_id()] = tablet; RETURN_NOT_OK(sys_catalog_->Upsert(term, tablet)); tablet->mutable_metadata()->CommitMutation(); } system_tablets_[kSysCatalogTabletId] = sys_catalog_->tablet_peer_->shared_tablet(); return Status::OK(); } template <class T> Status CatalogManager::PrepareSystemTableTemplate(const TableName& table_name, const NamespaceName& namespace_name, const NamespaceId& namespace_id, int64_t term) { YQLVirtualTable* vtable = new T(table_name, namespace_name, master_); return PrepareSystemTable( table_name, namespace_name, namespace_id, vtable->schema(), term, vtable); } Status CatalogManager::PrepareSystemTable(const TableName& table_name, const NamespaceName& namespace_name, const NamespaceId& namespace_id, const Schema& schema, int64_t term, YQLVirtualTable* vtable) { std::unique_ptr<YQLVirtualTable> yql_storage(vtable); scoped_refptr<TableInfo> table = FindPtrOrNull(table_names_map_, std::make_pair(namespace_id, table_name)); bool create_table = true; if (table != nullptr) { LOG_WITH_PREFIX(INFO) << "Table " << namespace_name << "." << table_name << " already created"; // Mark the table as a system table. table->set_is_system(); Schema persisted_schema; RETURN_NOT_OK(table->GetSchema(&persisted_schema)); if (!persisted_schema.Equals(schema)) { LOG_WITH_PREFIX(INFO) << "Updating schema of " << namespace_name << "." << table_name << " ..."; auto l = table->LockForWrite(); SchemaToPB(schema, l.mutable_data()->pb.mutable_schema()); l.mutable_data()->pb.set_version(l->pb.version() + 1); l.mutable_data()->pb.set_updates_only_index_permissions(false); // Update sys-catalog with the new table schema. RETURN_NOT_OK(sys_catalog_->Upsert(term, table)); l.Commit(); } // There might have been a failure after writing the table but before writing the tablets. As // a result, if we don't find any tablets, we try to create the tablets only again. vector<scoped_refptr<TabletInfo>> tablets; table->GetAllTablets(&tablets); if (!tablets.empty()) { // Initialize the appropriate system tablet. DCHECK_EQ(1, tablets.size()); auto tablet = tablets[0]; system_tablets_[tablet->tablet_id()] = std::make_shared<SystemTablet>(schema, std::move(yql_storage), tablet->tablet_id()); return Status::OK(); } else { // Table is already created, only need to create tablets now. LOG_WITH_PREFIX(INFO) << "Creating tablets for " << namespace_name << "." << table_name << " ..."; create_table = false; } } vector<TabletInfo*> tablets; // Create partitions. vector<Partition> partitions; PartitionSchemaPB partition_schema_pb; partition_schema_pb.set_hash_schema(PartitionSchemaPB::MULTI_COLUMN_HASH_SCHEMA); PartitionSchema partition_schema; RETURN_NOT_OK(PartitionSchema::FromPB(partition_schema_pb, schema, &partition_schema)); RETURN_NOT_OK(partition_schema.CreatePartitions(1, &partitions)); if (create_table) { // Fill in details for the system table. CreateTableRequestPB req; req.set_name(table_name); req.set_table_type(TableType::YQL_TABLE_TYPE); RETURN_NOT_OK(CreateTableInMemory( req, schema, partition_schema, true /* create_tablets */, namespace_id, namespace_name, partitions, nullptr, &tablets, nullptr, &table)); // Mark the table as a system table. LOG_WITH_PREFIX(INFO) << "Inserted new " << namespace_name << "." << table_name << " table info into CatalogManager maps"; // Update the on-disk table state to "running". table->mutable_metadata()->mutable_dirty()->pb.set_state(SysTablesEntryPB::RUNNING); RETURN_NOT_OK(sys_catalog_->Upsert(term, table)); LOG_WITH_PREFIX(INFO) << "Wrote table to system catalog: " << ToString(table) << ", tablets: " << ToString(tablets); } else { // Still need to create the tablets. RETURN_NOT_OK(CreateTabletsFromTable(partitions, table, &tablets)); } DCHECK_EQ(1, tablets.size()); // We use LOG_ASSERT here since this is expected to crash in some unit tests. LOG_ASSERT(!FLAGS_TEST_catalog_manager_simulate_system_table_create_failure); // Write Tablets to sys-tablets (in "running" state since we don't want the loadbalancer to // assign these tablets since this table is virtual). for (TabletInfo *tablet : tablets) { tablet->mutable_metadata()->mutable_dirty()->pb.set_state(SysTabletsEntryPB::RUNNING); } RETURN_NOT_OK(sys_catalog_->Upsert(term, tablets)); LOG_WITH_PREFIX(INFO) << "Wrote tablets to system catalog: " << ToString(tablets); // Commit the in-memory state. if (create_table) { table->mutable_metadata()->CommitMutation(); } for (TabletInfo *tablet : tablets) { tablet->mutable_metadata()->CommitMutation(); } // Mark the table as a system table. table->set_is_system(); // Finally create the appropriate tablet object. auto tablet = tablets[0]; system_tablets_[tablet->tablet_id()] = std::make_shared<SystemTablet>(schema, std::move(yql_storage), tablet->tablet_id()); return Status::OK(); } bool CatalogManager::IsYcqlNamespace(const NamespaceInfo& ns) { return ns.database_type() == YQLDatabase::YQL_DATABASE_CQL; } bool CatalogManager::IsYcqlTable(const TableInfo& table) { return table.GetTableType() == TableType::YQL_TABLE_TYPE && table.id() != kSysCatalogTableId; } Status CatalogManager::PrepareNamespace( YQLDatabase db_type, const NamespaceName& name, const NamespaceId& id, int64_t term) { scoped_refptr<NamespaceInfo> ns = FindPtrOrNull(namespace_ids_map_, id); if (ns != nullptr) { LOG_WITH_PREFIX(INFO) << "Keyspace " << ns->ToString() << " already created, skipping initialization"; return Status::OK(); } // Create entry. SysNamespaceEntryPB ns_entry; ns_entry.set_name(name); ns_entry.set_database_type(db_type); ns_entry.set_state(SysNamespaceEntryPB::RUNNING); // Create in memory object. ns = new NamespaceInfo(id); // Prepare write. auto l = ns->LockForWrite(); l.mutable_data()->pb = std::move(ns_entry); namespace_ids_map_[id] = ns; namespace_names_mapper_[db_type][l.mutable_data()->pb.name()] = ns; // Write to sys_catalog and in memory. RETURN_NOT_OK(sys_catalog_->Upsert(term, ns)); l.Commit(); LOG_WITH_PREFIX(INFO) << "Created default keyspace: " << ns->ToString(); return Status::OK(); } Status CatalogManager::CheckLocalHostInMasterAddresses() { auto local_hostport = master_->first_rpc_address(); std::vector<IpAddress> local_addrs; if (local_hostport.address().is_unspecified()) { auto status = GetLocalAddresses(&local_addrs, AddressFilter::ANY); if (!status.ok() || local_addrs.empty()) { LOG(WARNING) << "Could not enumerate network interfaces due to " << status << ", found " << local_addrs.size() << " local addresses."; return Status::OK(); } } else { local_addrs.push_back(local_hostport.address()); } std::vector<Endpoint> resolved_addresses; Status s = server::ResolveMasterAddresses(master_->opts().GetMasterAddresses(), &resolved_addresses); RETURN_NOT_OK(s); for (auto const &addr : resolved_addresses) { if (addr.address().is_unspecified() || std::find(local_addrs.begin(), local_addrs.end(), addr.address()) != local_addrs.end()) { return Status::OK(); } } return STATUS_SUBSTITUTE(IllegalState, "None of the local addresses are present in master_addresses $0.", master_->opts().master_addresses_flag); } Status CatalogManager::InitSysCatalogAsync() { LockGuard lock(mutex_); // Optimistically try to load data from disk. Status s = sys_catalog_->Load(master_->fs_manager()); if (!s.ok() && s.IsNotFound()) { // We have yet to intialize the syscatalog metadata, need to create the metadata file. LOG(INFO) << "Did not find previous SysCatalogTable data on disk. " << s; if (!master_->opts().AreMasterAddressesProvided()) { master_->SetShellMode(true); LOG(INFO) << "Starting master in shell mode."; return Status::OK(); } RETURN_NOT_OK(CheckLocalHostInMasterAddresses()); RETURN_NOT_OK_PREPEND(sys_catalog_->CreateNew(master_->fs_manager()), Substitute("Encountered errors during system catalog initialization:" "\n\tError on Load: $0\n\tError on CreateNew: ", s.ToString())); return Status::OK(); } return s; } bool CatalogManager::IsInitialized() const { std::lock_guard<simple_spinlock> l(state_lock_); return state_ == kRunning; } // TODO - delete this API after HandleReportedTablet() usage is removed. Status CatalogManager::CheckIsLeaderAndReady() const { std::lock_guard<simple_spinlock> l(state_lock_); if (PREDICT_FALSE(state_ != kRunning)) { return STATUS_SUBSTITUTE(ServiceUnavailable, "Catalog manager is shutting down. State: $0", state_); } string uuid = master_->fs_manager()->uuid(); if (master_->opts().IsShellMode()) { // Consensus and other internal fields should not be checked when is shell mode. return STATUS_SUBSTITUTE(IllegalState, "Catalog manager of $0 is in shell mode, not the leader", uuid); } Consensus* consensus = tablet_peer()->consensus(); if (consensus == nullptr) { return STATUS(IllegalState, "Consensus has not been initialized yet"); } ConsensusStatePB cstate = consensus->ConsensusState(CONSENSUS_CONFIG_COMMITTED); if (PREDICT_FALSE(!cstate.has_leader_uuid() || cstate.leader_uuid() != uuid)) { return STATUS_SUBSTITUTE(IllegalState, "Not the leader. Local UUID: $0, Consensus state: $1", uuid, cstate.ShortDebugString()); } if (PREDICT_FALSE(leader_ready_term_ != cstate.current_term())) { return STATUS_SUBSTITUTE(ServiceUnavailable, "Leader not yet ready to serve requests: ready term $0 vs cstate term $1", leader_ready_term_, cstate.current_term()); } return Status::OK(); } const std::shared_ptr<tablet::TabletPeer> CatalogManager::tablet_peer() const { return sys_catalog_->tablet_peer(); } RaftPeerPB::Role CatalogManager::Role() const { if (!IsInitialized() || master_->opts().IsShellMode()) { return RaftPeerPB::NON_PARTICIPANT; } return tablet_peer()->consensus()->role(); } bool CatalogManager::StartShutdown() { { std::lock_guard<simple_spinlock> l(state_lock_); if (state_ == kClosing) { VLOG(2) << "CatalogManager already shut down"; return false; } state_ = kClosing; } refresh_yql_partitions_task_.StartShutdown(); refresh_ysql_tablespace_info_task_.StartShutdown(); if (sys_catalog_) { sys_catalog_->StartShutdown(); } tablet_split_manager_.Shutdown(); return true; } void CatalogManager::CompleteShutdown() { // Shutdown the Catalog Manager background thread (load balancing). refresh_yql_partitions_task_.CompleteShutdown(); refresh_ysql_tablespace_info_task_.CompleteShutdown(); if (background_tasks_) { background_tasks_->Shutdown(); } if (background_tasks_thread_pool_) { background_tasks_thread_pool_->Shutdown(); } if (leader_initialization_pool_) { leader_initialization_pool_->Shutdown(); } if (async_task_pool_) { async_task_pool_->Shutdown(); } // Mark all outstanding table tasks as aborted and wait for them to fail. // // There may be an outstanding table visitor thread modifying the table map, // so we must make a copy of it before we iterate. It's OK if the visitor // adds more entries to the map even after we finish; it won't start any new // tasks for those entries. vector<scoped_refptr<TableInfo>> copy; { SharedLock lock(mutex_); AppendValuesFromMap(*table_ids_map_, &copy); } AbortAndWaitForAllTasks(copy); // Shut down the underlying storage for tables and tablets. if (sys_catalog_) { sys_catalog_->CompleteShutdown(); } // Reset the jobs/tasks tracker. tasks_tracker_->Reset(); jobs_tracker_->Reset(); if (initdb_future_ && initdb_future_->wait_for(0s) != std::future_status::ready) { LOG(WARNING) << "initdb is still running, waiting for it to complete."; initdb_future_->wait(); LOG(INFO) << "Finished running initdb, proceeding with catalog manager shutdown."; } } Status CatalogManager::AbortTableCreation(TableInfo* table, const vector<TabletInfo*>& tablets, const Status& s, CreateTableResponsePB* resp) { LOG(WARNING) << s; const TableId table_id = table->id(); const TableName table_name = table->mutable_metadata()->mutable_dirty()->pb.name(); const NamespaceId table_namespace_id = table->mutable_metadata()->mutable_dirty()->pb.namespace_id(); vector<string> tablet_ids_to_erase; for (TabletInfo* tablet : tablets) { tablet_ids_to_erase.push_back(tablet->tablet_id()); } LOG(INFO) << "Aborting creation of table '" << table_name << "', erasing table and tablets (" << JoinStrings(tablet_ids_to_erase, ",") << ") from in-memory state."; // Since this is a failed creation attempt, it's safe to just abort // all tasks, as (by definition) no tasks may be pending against a // table that has failed to successfully create. table->AbortTasksAndClose(); table->WaitTasksCompletion(); LockGuard lock(mutex_); // Call AbortMutation() manually, as otherwise the lock won't be released. for (TabletInfo* tablet : tablets) { tablet->mutable_metadata()->AbortMutation(); } table->mutable_metadata()->AbortMutation(); auto tablet_map_checkout = tablet_map_.CheckOut(); for (const TabletId& tablet_id_to_erase : tablet_ids_to_erase) { CHECK_EQ(tablet_map_checkout->erase(tablet_id_to_erase), 1) << "Unable to erase tablet " << tablet_id_to_erase << " from tablet map."; } auto table_ids_map_checkout = table_ids_map_.CheckOut(); table_names_map_.erase({table_namespace_id, table_name}); // Not present if PGSQL table. CHECK_EQ(table_ids_map_checkout->erase(table_id), 1) << "Unable to erase table with id " << table_id << " from table ids map."; return CheckIfNoLongerLeaderAndSetupError(s, resp); } Result<ReplicationInfoPB> CatalogManager::GetTableReplicationInfo( const ReplicationInfoPB& table_replication_info, const TablespaceId& tablespace_id) { if (IsReplicationInfoSet(table_replication_info)) { // The table has custom replication info set for it, return it if valid. RETURN_NOT_OK(ValidateTableReplicationInfo(table_replication_info)); return table_replication_info; } // Table level replication info not set. Check whether the table is // associated with a tablespace and if so, return the tablespace // replication info. if (GetAtomicFlag(&FLAGS_enable_ysql_tablespaces_for_placement)) { boost::optional<ReplicationInfoPB> tablespace_pb = VERIFY_RESULT(GetTablespaceReplicationInfoWithRetry(tablespace_id)); if (tablespace_pb) { // Return the tablespace placement. return tablespace_pb.value(); } } // Neither table nor tablespace info set. Return cluster level replication info. auto l = cluster_config_->LockForRead(); return l->pb.replication_info(); } std::shared_ptr<YsqlTablespaceManager> CatalogManager::GetTablespaceManager() { SharedLock lock(tablespace_mutex_); return tablespace_manager_; } Result<boost::optional<TablespaceId>> CatalogManager::GetTablespaceForTable( const scoped_refptr<TableInfo>& table) { auto tablespace_manager = GetTablespaceManager(); return tablespace_manager->GetTablespaceForTable(table); } Result<boost::optional<ReplicationInfoPB>> CatalogManager::GetTablespaceReplicationInfoWithRetry( const TablespaceId& tablespace_id) { auto tablespace_manager = GetTablespaceManager(); auto replication_info_result = tablespace_manager->GetTablespaceReplicationInfo(tablespace_id); if (replication_info_result) { return replication_info_result; } // We failed to find the tablespace placement policy. Refresh the tablespace info and try again. auto tablespace_map = VERIFY_RESULT(GetYsqlTablespaceInfo()); // We clone the tablespace_manager and update the clone with the new tablespace_map that we // fetched above. We do this instead of updating the tablespace_manager object in-place because // other clients may have a shared_ptr to it through 'GetTablespaceManager()'. tablespace_manager = tablespace_manager->CreateCloneWithTablespaceMap(tablespace_map); { LockGuard lock(tablespace_mutex_); tablespace_manager_ = tablespace_manager; } return tablespace_manager->GetTablespaceReplicationInfo(tablespace_id); } bool CatalogManager::IsReplicationInfoSet(const ReplicationInfoPB& replication_info) { const auto& live_placement_info = replication_info.live_replicas(); if (!(live_placement_info.placement_blocks().empty() && live_placement_info.num_replicas() <= 0 && live_placement_info.placement_uuid().empty()) || !replication_info.read_replicas().empty() || !replication_info.affinitized_leaders().empty()) { return true; } return false; } Status CatalogManager::ValidateTableReplicationInfo(const ReplicationInfoPB& replication_info) { if (!IsReplicationInfoSet(replication_info)) { return STATUS(InvalidArgument, "No replication info set."); } // We don't support setting any other fields other than live replica placements for now. if (!replication_info.read_replicas().empty() || !replication_info.affinitized_leaders().empty()) { return STATUS(InvalidArgument, "Only live placement info can be set for table " "level replication info."); } // Today we support setting table level replication info only in clusters where read replica // placements is not set. Return error if the cluster has read replica placements set. auto l = cluster_config_->LockForRead(); const ReplicationInfoPB& cluster_replication_info = l->pb.replication_info(); // TODO(bogdan): figure this out when we expand on geopartition support. // if (!cluster_replication_info.read_replicas().empty() || // !cluster_replication_info.affinitized_leaders().empty()) { // return STATUS(InvalidArgument, "Setting table level replication info is not supported " // "for clusters with read replica placements"); // } // If the replication info has placement_uuid set, verify that it matches the cluster // placement_uuid. if (replication_info.live_replicas().placement_uuid().empty()) { return Status::OK(); } if (replication_info.live_replicas().placement_uuid() != cluster_replication_info.live_replicas().placement_uuid()) { return STATUS(InvalidArgument, "Placement uuid for table level replication info " "must match that of the cluster's live placement info."); } return Status::OK(); } Result<shared_ptr<TablespaceIdToReplicationInfoMap>> CatalogManager::GetYsqlTablespaceInfo() { auto table_info = GetTableInfo(kPgTablespaceTableId); if (table_info == nullptr) { return STATUS(InternalError, "pg_tablespace table info not found"); } auto tablespace_map = VERIFY_RESULT(sys_catalog_->ReadPgTablespaceInfo()); // The tablespace options do not usually contain the placement uuid. // Populate the current cluster placement uuid into the placement information for // each tablespace. string placement_uuid; { auto l = cluster_config_->LockForRead(); // TODO(deepthi.srinivasan): Read-replica placements are not supported as // of now. placement_uuid = l->pb.replication_info().live_replicas().placement_uuid(); } if (!placement_uuid.empty()) { for (auto& iter : *tablespace_map) { if (iter.second) { iter.second.value().mutable_live_replicas()->set_placement_uuid(placement_uuid); } } } // Before updating the tablespace placement map, validate the // placement policies. for (auto& iter : *tablespace_map) { if (iter.second) { RETURN_NOT_OK(ValidateTableReplicationInfo(iter.second.value())); } } return tablespace_map; } Result<shared_ptr<TableToTablespaceIdMap>> CatalogManager::GetYsqlTableToTablespaceMap() { // First fetch all namespaces. This is because the table_to_tablespace information is only // found in the pg_class catalog table. There exists a separate pg_class table in each // namespace. To build in-memory state for all tables, process pg_class table for each // namespace. vector<NamespaceId> namespace_id_vec; { SharedLock lock(mutex_); for (const auto& ns : namespace_ids_map_) { if (ns.second->database_type() != YQL_DATABASE_PGSQL) { continue; } if (ns.second->colocated()) { // Skip processing tables in colocated databases. continue; } if (ns.first == kPgSequencesDataNamespaceId) { // Skip the database created for sequences system table. continue; } // TODO (Deepthi): Investigate if safe to skip template0 and template1 as well. namespace_id_vec.emplace_back(ns.first); } } // For each namespace, fetch the table->tablespace information by reading pg_class // table for each namespace. auto table_to_tablespace_map = std::make_shared<TableToTablespaceIdMap>(); for (const NamespaceId& nsid : namespace_id_vec) { VLOG(5) << "Refreshing placement information for namespace " << nsid; const uint32_t database_oid = CHECK_RESULT(GetPgsqlDatabaseOid(nsid)); Status s = sys_catalog_->ReadPgClassInfo(database_oid, table_to_tablespace_map.get()); if (!s.ok()) { LOG(WARNING) << "Refreshing table->tablespace info failed for namespace " << nsid << " with error: " << s.ToString(); continue; } VLOG(5) << "Successfully refreshed placement information for namespace " << nsid; } return table_to_tablespace_map; } void CatalogManager::StartTablespaceBgTaskIfStopped() { if (GetAtomicFlag(&FLAGS_ysql_tablespace_info_refresh_secs) <= 0 || !GetAtomicFlag(&FLAGS_enable_ysql_tablespaces_for_placement)) { // The tablespace bg task is disabled. Nothing to do. return; } const bool is_task_running = tablespace_bg_task_running_.exchange(true); if (is_task_running) { // Task already running, nothing to do. return; } ScheduleRefreshTablespaceInfoTask(true /* schedule_now */); } void CatalogManager::ScheduleRefreshTablespaceInfoTask(const bool schedule_now) { int wait_time = 0; if (!schedule_now) { wait_time = GetAtomicFlag(&FLAGS_ysql_tablespace_info_refresh_secs); if (wait_time <= 0) { // The tablespace refresh task has been disabled. tablespace_bg_task_running_ = false; return; } } refresh_ysql_tablespace_info_task_.Schedule([this](const Status& status) { Status s = background_tasks_thread_pool_->SubmitFunc( std::bind(&CatalogManager::RefreshTablespaceInfoPeriodically, this)); if (!s.IsOk()) { // Failed to submit task to the thread pool. Mark that the task is now // no longer running. LOG(WARNING) << "Failed to schedule: RefreshTablespaceInfoPeriodically"; tablespace_bg_task_running_ = false; } }, wait_time * 1s); } void CatalogManager::RefreshTablespaceInfoPeriodically() { if (!GetAtomicFlag(&FLAGS_enable_ysql_tablespaces_for_placement)) { tablespace_bg_task_running_ = false; return; } if (!CheckIsLeaderAndReady().IsOk()) { LOG(INFO) << "No longer the leader, so cancelling tablespace info task"; tablespace_bg_task_running_ = false; return; } // Refresh the tablespace info in memory. Status s = DoRefreshTablespaceInfo(); if (!s.IsOk()) { LOG(WARNING) << "Tablespace refresh task failed with error " << s.ToString(); } // Schedule the next iteration of the task. ScheduleRefreshTablespaceInfoTask(); } Status CatalogManager::DoRefreshTablespaceInfo() { VLOG(2) << "Running RefreshTablespaceInfoPeriodically task"; // First refresh the tablespace info in memory. auto tablespace_info = VERIFY_RESULT(GetYsqlTablespaceInfo()); shared_ptr<TableToTablespaceIdMap> table_to_tablespace_map = nullptr; if (tablespace_info->size() > kYsqlNumDefaultTablespaces) { // There exist custom tablespaces in the system. Fetch the table->tablespace // map from PG catalog tables. table_to_tablespace_map = VERIFY_RESULT(GetYsqlTableToTablespaceMap()); } // Update tablespace_manager_. { LockGuard lock(tablespace_mutex_); tablespace_manager_ = std::make_shared<YsqlTablespaceManager>(tablespace_info, table_to_tablespace_map); } VLOG(3) << "Refreshed tablespace information in memory"; return Status::OK(); } Status CatalogManager::AddIndexInfoToTable(const scoped_refptr<TableInfo>& indexed_table, const IndexInfoPB& index_info, CreateTableResponsePB* resp) { LOG(INFO) << "AddIndexInfoToTable to " << indexed_table->ToString() << " IndexInfo " << yb::ToString(index_info); TRACE("Locking indexed table"); auto l = DCHECK_NOTNULL(indexed_table)->LockForWrite(); RETURN_NOT_OK(CheckIfTableDeletedOrNotVisibleToClient(l, resp)); // Make sure that the index appears to not have been added to the table until the tservers apply // the alter and respond back. // Heed issue #6233. if (!l->pb.has_fully_applied_schema()) { MultiStageAlterTable::CopySchemaDetailsToFullyApplied(&l.mutable_data()->pb); } // Add index info to indexed table and increment schema version. auto& pb = l.mutable_data()->pb; pb.add_indexes()->CopyFrom(index_info); pb.set_version(l.mutable_data()->pb.version() + 1); pb.set_updates_only_index_permissions(false); l.mutable_data()->set_state( SysTablesEntryPB::ALTERING, Format("Add index info version=$0 ts=$1", pb.version(), LocalTimeAsString())); // Update sys-catalog with the new indexed table info. TRACE("Updating indexed table metadata on disk"); RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), indexed_table)); // Update the in-memory state. TRACE("Committing in-memory state"); l.Commit(); RETURN_NOT_OK(SendAlterTableRequest(indexed_table)); return Status::OK(); } Status CatalogManager::CreateCopartitionedTable(const CreateTableRequestPB& req, CreateTableResponsePB* resp, rpc::RpcContext* rpc, Schema schema, scoped_refptr<NamespaceInfo> ns) { scoped_refptr<TableInfo> parent_table_info; Status s; PartitionSchema partition_schema; std::vector<Partition> partitions; const NamespaceId& namespace_id = ns->id(); const NamespaceName& namespace_name = ns->name(); LockGuard lock(mutex_); TRACE("Acquired catalog manager lock"); parent_table_info = FindPtrOrNull(*table_ids_map_, schema.table_properties().CopartitionTableId()); if (parent_table_info == nullptr) { s = STATUS(NotFound, "The object does not exist: copartitioned table with id", schema.table_properties().CopartitionTableId()); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } scoped_refptr<TableInfo> this_table_info; std::vector<TabletInfo *> tablets; TabletInfos scoped_ref_tablets; // Verify that the table does not exist. this_table_info = FindPtrOrNull(table_names_map_, {namespace_id, req.name()}); if (this_table_info != nullptr) { s = STATUS_SUBSTITUTE(AlreadyPresent, "Object '$0.$1' already exists", GetNamespaceNameUnlocked(this_table_info), this_table_info->name()); LOG(WARNING) << "Found table: " << this_table_info->ToStringWithState() << ". Failed creating copartitioned table with error: " << s.ToString() << " Request:\n" << req.DebugString(); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_ALREADY_PRESENT, s); } // Don't add copartitioned tables to Namespaces that aren't running. if (ns->state() != SysNamespaceEntryPB::RUNNING) { Status s = STATUS_SUBSTITUTE(TryAgain, "Namespace not running (State=$0). Cannot create $1.$2", ns->state(), ns->name(), req.name() ); return SetupError(resp->mutable_error(), NamespaceMasterError(ns->state()), s); } // TODO: pass index_info for copartitioned index. RETURN_NOT_OK(CreateTableInMemory( req, schema, partition_schema, false /* create_tablets */, namespace_id, namespace_name, partitions, nullptr, nullptr, resp, &this_table_info)); TRACE("Inserted new table info into CatalogManager maps"); // NOTE: the table is already locked for write at this point, // since the CreateTableInfo function leave it in that state. // It will get committed at the end of this function. // Sanity check: the table should be in "preparing" state. CHECK_EQ(SysTablesEntryPB::PREPARING, this_table_info->metadata().dirty().pb.state()); parent_table_info->GetAllTablets(&scoped_ref_tablets); for (auto tablet : scoped_ref_tablets) { tablets.push_back(tablet.get()); tablet->mutable_metadata()->StartMutation(); tablet->mutable_metadata()->mutable_dirty()->pb.add_table_ids(this_table_info->id()); } // Update Tablets about new table id to sys-tablets. s = sys_catalog_->Upsert(leader_ready_term(), tablets); if (PREDICT_FALSE(!s.ok())) { return AbortTableCreation(this_table_info.get(), tablets, s.CloneAndPrepend( Substitute("An error occurred while inserting to sys-tablets: $0", s.ToString())), resp); } TRACE("Wrote tablets to system table"); // Update the on-disk table state to "running". this_table_info->AddTablets(tablets); this_table_info->mutable_metadata()->mutable_dirty()->pb.set_state(SysTablesEntryPB::RUNNING); s = sys_catalog_->Upsert(leader_ready_term(), this_table_info);; if (PREDICT_FALSE(!s.ok())) { return AbortTableCreation(this_table_info.get(), tablets, s.CloneAndPrepend( Substitute("An error occurred while inserting to sys-tablets: $0", s.ToString())), resp); } TRACE("Wrote table to system table"); // Commit the in-memory state. this_table_info->mutable_metadata()->CommitMutation(); for (TabletInfo *tablet : tablets) { tablet->mutable_metadata()->CommitMutation(); } for (const auto& tablet : scoped_ref_tablets) { SendCopartitionTabletRequest(tablet, this_table_info); } LOG(INFO) << "Successfully created table " << this_table_info->ToString() << " per request from " << RequestorString(rpc); return Status::OK(); } namespace { Result<std::array<PartitionPB, kNumSplitParts>> CreateNewTabletsPartition( const TabletInfo& tablet_info, const std::string& split_partition_key) { const auto& source_partition = tablet_info.LockForRead()->pb.partition(); if (split_partition_key <= source_partition.partition_key_start() || (!source_partition.partition_key_end().empty() && split_partition_key >= source_partition.partition_key_end())) { return STATUS_FORMAT( InvalidArgument, "Can't split tablet $0 (partition_key_start: $1 partition_key_end: $2) by partition " "boundary (split_key: $3)", tablet_info.tablet_id(), source_partition.partition_key_start(), source_partition.partition_key_end(), split_partition_key); } std::array<PartitionPB, kNumSplitParts> new_tablets_partition; new_tablets_partition.fill(source_partition); new_tablets_partition[0].set_partition_key_end(split_partition_key); new_tablets_partition[1].set_partition_key_start(split_partition_key); static_assert(kNumSplitParts == 2, "We expect tablet to be split into 2 new tablets here"); return new_tablets_partition; } } // namespace CHECKED_STATUS CatalogManager::TEST_SplitTablet( const TabletId& tablet_id, const std::string& split_encoded_key, const std::string& split_partition_key) { auto source_tablet_info = VERIFY_RESULT(GetTabletInfo(tablet_id)); return DoSplitTablet(source_tablet_info, split_encoded_key, split_partition_key); } Status CatalogManager::TEST_SplitTablet( const scoped_refptr<TabletInfo>& source_tablet_info, docdb::DocKeyHash split_hash_code) { return DoSplitTablet(source_tablet_info, split_hash_code); } Status CatalogManager::TEST_IncrementTablePartitionListVersion(const TableId& table_id) { auto table_info = GetTableInfo(table_id); SCHECK(table_info != nullptr, NotFound, Format("Table $0 not found", table_id)); LockGuard lock(mutex_); auto table_lock = table_info->LockForWrite(); auto& table_pb = table_lock.mutable_data()->pb; table_pb.set_partition_list_version(table_pb.partition_list_version() + 1); RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), table_info)); table_lock.Commit(); return Status::OK(); } Status CatalogManager::ValidateSplitCandidate(const TabletInfo& tablet_info) { if (PREDICT_FALSE(FLAGS_TEST_validate_all_tablet_candidates)) { return Status::OK(); } const TableInfo& table = *tablet_info.table().get(); // Check if this tablet is covered by an PITR schedule. if (!FLAGS_enable_tablet_split_of_pitr_tables && VERIFY_RESULT(IsTablePartOfSomeSnapshotSchedule(table))) { VLOG(1) << Substitute("Tablet splitting is not supported for tables that are a part of" " some active PITR schedule, tablet_id: $0", tablet_info.tablet_id()); return STATUS_FORMAT( NotSupported, "Tablet splitting is not supported for tables that are a part of" " some active PITR schedule, tablet_id: $0", tablet_info.tablet_id()); } // Check if this tablet is part of a cdc stream. if (PREDICT_TRUE(!FLAGS_enable_tablet_split_of_xcluster_replicated_tables) && IsCdcEnabled(table)) { VLOG(1) << Substitute("Tablet splitting is not supported for tables that are a part of" " a CDC stream, tablet_id: $0", tablet_info.tablet_id()); return STATUS_FORMAT( NotSupported, "Tablet splitting is not supported for tables that are a part of" " a CDC stream, tablet_id: $0", tablet_info.tablet_id()); } if (tablet_info.table()->GetTableType() == TRANSACTION_STATUS_TABLE_TYPE) { return STATUS_FORMAT( NotSupported, "Tablet splitting is not supported for transaction status tables, tablet_id: $0", tablet_info.tablet_id()); } if (tablet_info.colocated()) { return STATUS_FORMAT( NotSupported, "Tablet splitting is not supported for colocated tables, tablet_id: $0", tablet_info.tablet_id()); } { auto tablet_state = tablet_info.LockForRead()->pb.state(); if (tablet_state != SysTabletsEntryPB::RUNNING) { return STATUS_EC_FORMAT(IllegalState, MasterError(MasterErrorPB::TABLET_NOT_RUNNING), "Tablet is not in running state: $0", tablet_state); } } if (tablet_info.table()->GetTableType() == REDIS_TABLE_TYPE) { return STATUS_FORMAT( NotSupported, "Tablet splitting is not supported for YEDIS tables, tablet_id: $0", tablet_info.tablet_id()); } if (FLAGS_tablet_split_limit_per_table != 0 && tablet_info.table()->NumTablets() >= FLAGS_tablet_split_limit_per_table) { // TODO(tsplit): Avoid tablet server of scanning tablets for the tables that already // reached the split limit of tablet #6220 return STATUS_EC_FORMAT(IllegalState, MasterError(MasterErrorPB::REACHED_SPLIT_LIMIT), "Too many tablets for the table, table_id: $0, limit: $1", tablet_info.table()->id(), FLAGS_tablet_split_limit_per_table); } if (tablet_info.table()->IsBackfilling()) { return STATUS_EC_FORMAT(IllegalState, MasterError(MasterErrorPB::SPLIT_OR_BACKFILL_IN_PROGRESS), "Backfill operation in progress, table_id: $0", tablet_info.table()->id()); } return Status::OK(); } bool CatalogManager::ShouldSplitValidCandidate( const TabletInfo& tablet_info, const TabletReplicaDriveInfo& drive_info) const { if (PREDICT_FALSE(FLAGS_TEST_select_all_tablets_for_split)) { return true; } if (drive_info.may_have_orphaned_post_split_data) { return false; } int64 size = drive_info.sst_files_size; DCHECK(size >= 0) << "Detected overflow in casting sst_files_size to signed int."; if (size < FLAGS_tablet_split_low_phase_size_threshold_bytes) { return false; } TSDescriptorVector ts_descs; { BlacklistSet blacklist = BlacklistSetFromPB(); master_->ts_manager()->GetAllLiveDescriptors(&ts_descs, blacklist); } auto num_servers = ts_descs.size(); int64 num_tablets_per_server = tablet_info.table()->NumTablets() / num_servers; if (num_tablets_per_server < FLAGS_tablet_split_low_phase_shard_count_per_node) { return size > FLAGS_tablet_split_low_phase_size_threshold_bytes; } if (num_tablets_per_server < FLAGS_tablet_split_high_phase_shard_count_per_node) { return size > FLAGS_tablet_split_high_phase_size_threshold_bytes; } return size > FLAGS_tablet_force_split_threshold_bytes; } Status CatalogManager::DoSplitTablet( const scoped_refptr<TabletInfo>& source_tablet_info, const std::string& split_encoded_key, const std::string& split_partition_key) { auto source_table_lock = source_tablet_info->table()->LockForWrite(); auto source_tablet_lock = source_tablet_info->LockForWrite(); // We must re-validate the split candidate here *after* grabbing locks on the table and tablet to // ensure a backfill does not happen before we modify catalog metadata to include new subtablets. // This process adds new subtablets in the CREATING state, which if encountered by backfill code // will block the backfill process. RETURN_NOT_OK(ValidateSplitCandidate(*source_tablet_info)); auto drive_info = VERIFY_RESULT(source_tablet_info->GetLeaderReplicaDriveInfo()); if (!ShouldSplitValidCandidate(*source_tablet_info, drive_info)) { // It is possible that we queued up a split candidate in TabletSplitManager which was, at the // time, a valid split candidate, but by the time the candidate was actually processed here, the // cluster may have changed, putting us in a new split threshold phase, and it may no longer be // a valid candidate. This is not an unexpected error, but we should bail out of splitting this // tablet regardless. return STATUS_FORMAT( InvalidArgument, "Tablet split candidate $0 is no longer a valid split candidate.", source_tablet_info->tablet_id()); } LOG(INFO) << "Starting tablet split: " << source_tablet_info->ToString() << " by partition key: " << Slice(split_partition_key).ToDebugHexString(); std::array<PartitionPB, kNumSplitParts> new_tablets_partition = VERIFY_RESULT( CreateNewTabletsPartition(*source_tablet_info, split_partition_key)); std::array<TabletId, kNumSplitParts> new_tablet_ids; for (int i = 0; i < kNumSplitParts; ++i) { if (i < source_tablet_lock->pb.split_tablet_ids_size()) { // Post-split tablet `i` has been already registered. new_tablet_ids[i] = source_tablet_lock->pb.split_tablet_ids(i); } else { auto* new_tablet_info = VERIFY_RESULT(RegisterNewTabletForSplit( source_tablet_info.get(), new_tablets_partition[i], &source_table_lock)); new_tablet_ids[i] = new_tablet_info->id(); source_tablet_lock.mutable_data()->pb.add_split_tablet_ids(new_tablet_info->id()); } } source_tablet_lock.Commit(); source_table_lock.Commit(); // TODO(tsplit): what if source tablet will be deleted before or during TS leader is processing // split? Add unit-test. RETURN_NOT_OK(SendSplitTabletRequest( source_tablet_info, new_tablet_ids, split_encoded_key, split_partition_key)); return Status::OK(); } Status CatalogManager::DoSplitTablet( const scoped_refptr<TabletInfo>& source_tablet_info, docdb::DocKeyHash split_hash_code) { docdb::KeyBytes split_encoded_key; docdb::DocKeyEncoderAfterTableIdStep(&split_encoded_key) .Hash(split_hash_code, std::vector<docdb::PrimitiveValue>()); const auto split_partition_key = PartitionSchema::EncodeMultiColumnHashValue(split_hash_code); return DoSplitTablet(source_tablet_info, split_encoded_key.ToStringBuffer(), split_partition_key); } Result<scoped_refptr<TabletInfo>> CatalogManager::GetTabletInfo(const TabletId& tablet_id) { LockGuard lock(mutex_); TRACE("Acquired catalog manager lock"); const auto tablet_info = FindPtrOrNull(*tablet_map_, tablet_id); SCHECK(tablet_info != nullptr, NotFound, Format("Tablet $0 not found", tablet_id)); return tablet_info; } void CatalogManager::SplitTabletWithKey( const scoped_refptr<TabletInfo>& tablet, const std::string& split_encoded_key, const std::string& split_partition_key) { // Note that DoSplitTablet() will trigger an async SplitTablet task, and will only return not OK() // if it failed to submit that task. In other words, any failures here are not retriable, and // success indicates that an async and automatically retrying task was submitted. auto s = DoSplitTablet(tablet, split_encoded_key, split_partition_key); WARN_NOT_OK(s, Format("Failed to split tablet with GetSplitKey result for tablet: $0", tablet->tablet_id())); if (!s.ok()) { tablet_split_manager_.RemoveFailedProcessingTabletSplit(tablet->tablet_id()); } } Status CatalogManager::SplitTablet(const TabletId& tablet_id) { LOG(INFO) << "Got tablet to split: " << tablet_id; const auto tablet = VERIFY_RESULT(GetTabletInfo(tablet_id)); VLOG(2) << "Scheduling GetSplitKey request to leader tserver for source tablet ID: " << tablet->tablet_id(); auto call = std::make_shared<AsyncGetTabletSplitKey>( master_, AsyncTaskPool(), tablet, [this, tablet](const Result<AsyncGetTabletSplitKey::Data>& result) { if (result.ok()) { SplitTabletWithKey(tablet, result->split_encoded_key, result->split_partition_key); } else { LOG(WARNING) << "AsyncGetTabletSplitKey task failed with status: " << result.status(); tablet_split_manager_.RemoveFailedProcessingTabletSplit(tablet->tablet_id()); } }); tablet->table()->AddTask(call); return ScheduleTask(call); } Status CatalogManager::SplitTablet( const SplitTabletRequestPB* req, SplitTabletResponsePB* resp, rpc::RpcContext* rpc) { const auto source_tablet_id = req->tablet_id(); const auto source_tablet_info = VERIFY_RESULT(GetTabletInfo(source_tablet_id)); const auto source_partition = source_tablet_info->LockForRead()->pb.partition(); const auto start_hash_code = source_partition.partition_key_start().empty() ? 0 : PartitionSchema::DecodeMultiColumnHashValue(source_partition.partition_key_start()); const auto end_hash_code = source_partition.partition_key_end().empty() ? std::numeric_limits<docdb::DocKeyHash>::max() : PartitionSchema::DecodeMultiColumnHashValue(source_partition.partition_key_end()); const auto split_hash_code = (start_hash_code + end_hash_code) / 2; return DoSplitTablet(source_tablet_info, split_hash_code); } Status CatalogManager::DeleteNotServingTablet( const DeleteNotServingTabletRequestPB* req, DeleteNotServingTabletResponsePB* resp, rpc::RpcContext* rpc) { const auto& tablet_id = req->tablet_id(); const auto tablet_info = VERIFY_RESULT(GetTabletInfo(tablet_id)); if (PREDICT_FALSE(FLAGS_TEST_reject_delete_not_serving_tablet_rpc)) { TEST_SYNC_POINT("CatalogManager::DeleteNotServingTablet:Reject"); return STATUS( InvalidArgument, "Rejecting due to FLAGS_TEST_reject_delete_not_serving_tablet_rpc"); } const auto& table_info = tablet_info->table(); RETURN_NOT_OK(CheckIfForbiddenToDeleteTabletOf(table_info)); RETURN_NOT_OK(CatalogManagerUtil::CheckIfCanDeleteSingleTablet(tablet_info)); auto schedules_to_tables_map = VERIFY_RESULT( MakeSnapshotSchedulesToObjectIdsMap(SysRowEntry::TABLE)); RepeatedBytes retained_by_snapshot_schedules; FillRetainedBySnapshotSchedules( schedules_to_tables_map, table_info->id(), &retained_by_snapshot_schedules); return DeleteTabletListAndSendRequests( { tablet_info }, "Not serving tablet deleted upon request at " + LocalTimeAsString(), retained_by_snapshot_schedules); } Status CatalogManager::DdlLog( const DdlLogRequestPB* req, DdlLogResponsePB* resp, rpc::RpcContext* rpc) { return sys_catalog_->FetchDdlLog(resp->mutable_entries()); } namespace { CHECKED_STATUS ValidateCreateTableSchema(const Schema& schema, CreateTableResponsePB* resp) { if (schema.num_key_columns() <= 0) { return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, STATUS(InvalidArgument, "Must specify at least one key column")); } for (int i = 0; i < schema.num_key_columns(); i++) { if (!IsTypeAllowableInKey(schema.column(i).type_info())) { return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, STATUS(InvalidArgument, "Invalid datatype for primary key column")); } } return Status::OK(); } } // namespace Status CatalogManager::CreateYsqlSysTable(const CreateTableRequestPB* req, CreateTableResponsePB* resp) { LOG(INFO) << "CreateYsqlSysTable: " << req->name(); // Lookup the namespace and verify if it exists. TRACE("Looking up namespace"); auto ns = VERIFY_RESULT(FindNamespace(req->namespace_())); const NamespaceId& namespace_id = ns->id(); const NamespaceName& namespace_name = ns->name(); Schema schema; RETURN_NOT_OK(SchemaFromPB(req->schema(), &schema)); // If the schema contains column ids, we are copying a Postgres table from one namespace to // another. Anyway, validate the schema. RETURN_NOT_OK(ValidateCreateTableSchema(schema, resp)); if (!schema.has_column_ids()) { schema.InitColumnIdsByDefault(); } schema.mutable_table_properties()->set_is_ysql_catalog_table(true); // Verify no hash partition schema is specified. if (req->partition_schema().has_hash_schema()) { return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, STATUS(InvalidArgument, "PostgreSQL system catalog tables are non-partitioned")); } if (req->table_type() != TableType::PGSQL_TABLE_TYPE) { return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, STATUS_FORMAT( InvalidArgument, "Expected table type to be PGSQL_TABLE_TYPE ($0), got $1 ($2)", PGSQL_TABLE_TYPE, TableType_Name(req->table_type()))); } // Create partition schema and one partition. PartitionSchema partition_schema; vector<Partition> partitions; RETURN_NOT_OK(partition_schema.CreatePartitions(1, &partitions)); // Create table info in memory. scoped_refptr<TableInfo> table; vector<TabletInfo*> tablets; scoped_refptr<TabletInfo> sys_catalog_tablet; { LockGuard lock(mutex_); TRACE("Acquired catalog manager lock"); // Verify that the table does not exist, or has been deleted. table = FindPtrOrNull(*table_ids_map_, req->table_id()); if (table != nullptr && !table->is_deleted()) { Status s = STATUS_SUBSTITUTE(AlreadyPresent, "YSQL table '$0.$1' (ID: $2) already exists", ns->name(), table->name(), table->id()); LOG(WARNING) << "Found table: " << table->ToStringWithState() << ". Failed creating YSQL system table with error: " << s.ToString() << " Request:\n" << req->DebugString(); // Technically, client already knows table ID, but we set it anyway for unified handling of // AlreadyPresent errors. See comment in CreateTable() resp->set_table_id(table->id()); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_ALREADY_PRESENT, s); } RETURN_NOT_OK(CreateTableInMemory( *req, schema, partition_schema, false /* create_tablets */, namespace_id, namespace_name, partitions, nullptr /* index_info */, nullptr /* tablets */, resp, &table)); sys_catalog_tablet = tablet_map_->find(kSysCatalogTabletId)->second; } // Tables with a transaction should be rolled back if the transaction does not get committed. // Store this on the table persistent state until the transaction has been a verified success. TransactionMetadata txn; if (req->has_transaction() && FLAGS_enable_transactional_ddl_gc) { table->mutable_metadata()->mutable_dirty()->pb.mutable_transaction()-> CopyFrom(req->transaction()); txn = VERIFY_RESULT(TransactionMetadata::FromPB(req->transaction())); RSTATUS_DCHECK(!txn.status_tablet.empty(), Corruption, "Given incomplete Transaction"); } { auto tablet_lock = sys_catalog_tablet->LockForWrite(); tablet_lock.mutable_data()->pb.add_table_ids(table->id()); Status s = sys_catalog_->Upsert(leader_ready_term(), sys_catalog_tablet);; if (PREDICT_FALSE(!s.ok())) { return AbortTableCreation(table.get(), tablets, s.CloneAndPrepend( "An error occurred while inserting to sys-tablets: "), resp); } table->set_is_system(); table->AddTablet(sys_catalog_tablet.get()); tablet_lock.Commit(); } TRACE("Inserted new table info into CatalogManager maps"); // Update the on-disk table state to "running". table->mutable_metadata()->mutable_dirty()->pb.set_state(SysTablesEntryPB::RUNNING); Status s = sys_catalog_->Upsert(leader_ready_term(), table);; if (PREDICT_FALSE(!s.ok())) { return AbortTableCreation(table.get(), tablets, s.CloneAndPrepend( "An error occurred while inserting to sys-tablets: "), resp); } TRACE("Wrote table to system table"); // Commit the in-memory state. table->mutable_metadata()->CommitMutation(); // Verify Transaction gets committed, which occurs after table create finishes. if (req->has_transaction() && PREDICT_TRUE(FLAGS_enable_transactional_ddl_gc)) { LOG(INFO) << "Enqueuing table for Transaction Verification: " << req->name(); std::function<Status(bool)> when_done = std::bind(&CatalogManager::VerifyTablePgLayer, this, table, _1); WARN_NOT_OK(background_tasks_thread_pool_->SubmitFunc( std::bind(&YsqlTransactionDdl::VerifyTransaction, &ysql_transaction_, txn, when_done)), "Could not submit VerifyTransaction to thread pool"); } tserver::ChangeMetadataRequestPB change_req; change_req.set_tablet_id(kSysCatalogTabletId); auto& add_table = *change_req.mutable_add_table(); add_table.set_table_id(req->table_id()); add_table.set_table_type(TableType::PGSQL_TABLE_TYPE); add_table.set_table_name(req->name()); SchemaToPB(schema, add_table.mutable_schema()); add_table.set_schema_version(0); partition_schema.ToPB(add_table.mutable_partition_schema()); RETURN_NOT_OK(tablet::SyncReplicateChangeMetadataOperation( &change_req, sys_catalog_->tablet_peer().get(), leader_ready_term())); if (initial_snapshot_writer_) { initial_snapshot_writer_->AddMetadataChange(change_req); } return Status::OK(); } Status CatalogManager::ReservePgsqlOids(const ReservePgsqlOidsRequestPB* req, ReservePgsqlOidsResponsePB* resp, rpc::RpcContext* rpc) { VLOG(1) << "ReservePgsqlOids request: " << req->ShortDebugString(); // Lookup namespace scoped_refptr<NamespaceInfo> ns; { SharedLock lock(mutex_); ns = FindPtrOrNull(namespace_ids_map_, req->namespace_id()); } if (!ns) { return SetupError(resp->mutable_error(), MasterErrorPB::NAMESPACE_NOT_FOUND, STATUS(NotFound, "Namespace not found", req->namespace_id())); } // Reserve oids. auto l = ns->LockForWrite(); uint32_t begin_oid = l->pb.next_pg_oid(); if (begin_oid < req->next_oid()) { begin_oid = req->next_oid(); } if (begin_oid == std::numeric_limits<uint32_t>::max()) { LOG(WARNING) << Format("No more object identifier is available for Postgres database $0 ($1)", l->pb.name(), req->namespace_id()); return SetupError(resp->mutable_error(), MasterErrorPB::UNKNOWN_ERROR, STATUS(InvalidArgument, "No more object identifier is available")); } uint32_t end_oid = begin_oid + req->count(); if (end_oid < begin_oid) { end_oid = std::numeric_limits<uint32_t>::max(); // Handle wraparound. } resp->set_begin_oid(begin_oid); resp->set_end_oid(end_oid); l.mutable_data()->pb.set_next_pg_oid(end_oid); // Update the on-disk state. const Status s = sys_catalog_->Upsert(leader_ready_term(), ns);; if (!s.ok()) { return SetupError(resp->mutable_error(), MasterErrorPB::UNKNOWN_ERROR, s); } // Commit the in-memory state. l.Commit(); VLOG(1) << "ReservePgsqlOids response: " << resp->ShortDebugString(); return Status::OK(); } Status CatalogManager::GetYsqlCatalogConfig(const GetYsqlCatalogConfigRequestPB* req, GetYsqlCatalogConfigResponsePB* resp, rpc::RpcContext* rpc) { VLOG(1) << "GetYsqlCatalogConfig request: " << req->ShortDebugString(); auto l = CHECK_NOTNULL(ysql_catalog_config_.get())->LockForRead(); resp->set_version(l->pb.ysql_catalog_config().version()); return Status::OK(); } Status CatalogManager::CopyPgsqlSysTables(const NamespaceId& namespace_id, const std::vector<scoped_refptr<TableInfo>>& tables) { const uint32_t database_oid = CHECK_RESULT(GetPgsqlDatabaseOid(namespace_id)); vector<TableId> source_table_ids; vector<TableId> target_table_ids; for (const auto& table : tables) { CreateTableRequestPB table_req; CreateTableResponsePB table_resp; const uint32_t table_oid = VERIFY_RESULT(GetPgsqlTableOid(table->id())); const TableId table_id = GetPgsqlTableId(database_oid, table_oid); // Hold read lock until rows from the table are copied also. auto l = table->LockForRead(); // Skip shared table. if (l->pb.is_pg_shared_table()) { continue; } table_req.set_name(l->pb.name()); table_req.mutable_namespace_()->set_id(namespace_id); table_req.set_table_type(PGSQL_TABLE_TYPE); table_req.mutable_schema()->CopyFrom(l->schema()); table_req.set_is_pg_catalog_table(true); table_req.set_table_id(table_id); if (IsIndex(l->pb)) { const uint32_t indexed_table_oid = VERIFY_RESULT(GetPgsqlTableOid(GetIndexedTableId(l->pb))); const TableId indexed_table_id = GetPgsqlTableId(database_oid, indexed_table_oid); // Set index_info. // Previously created INDEX wouldn't have the attribute index_info. if (l->pb.has_index_info()) { table_req.mutable_index_info()->CopyFrom(l->pb.index_info()); table_req.mutable_index_info()->set_indexed_table_id(indexed_table_id); } // Set deprecated field for index_info. table_req.set_indexed_table_id(indexed_table_id); table_req.set_is_local_index(PROTO_GET_IS_LOCAL(l->pb)); table_req.set_is_unique_index(PROTO_GET_IS_UNIQUE(l->pb)); } auto s = CreateYsqlSysTable(&table_req, &table_resp); if (!s.ok()) { return s.CloneAndPrepend(Substitute( "Failure when creating PGSQL System Tables: $0", table_resp.error().ShortDebugString())); } source_table_ids.push_back(table->id()); target_table_ids.push_back(table_id); } RETURN_NOT_OK( sys_catalog_->CopyPgsqlTables(source_table_ids, target_table_ids, leader_ready_term())); return Status::OK(); } // Create a new table. // See README file in this directory for a description of the design. Status CatalogManager::CreateTable(const CreateTableRequestPB* orig_req, CreateTableResponsePB* resp, rpc::RpcContext* rpc) { DVLOG(3) << __PRETTY_FUNCTION__ << " Begin. " << orig_req->DebugString(); const bool is_pg_table = orig_req->table_type() == PGSQL_TABLE_TYPE; const bool is_pg_catalog_table = is_pg_table && orig_req->is_pg_catalog_table(); if (!is_pg_catalog_table || !FLAGS_hide_pg_catalog_table_creation_logs) { LOG(INFO) << "CreateTable from " << RequestorString(rpc) << ":\n" << orig_req->DebugString(); } else { LOG(INFO) << "CreateTable from " << RequestorString(rpc) << ": " << orig_req->name(); } const bool is_transactional = orig_req->schema().table_properties().is_transactional(); // If this is a transactional table, we need to create the transaction status table (if it does // not exist already). if (is_transactional && (!is_pg_catalog_table || !FLAGS_create_initial_sys_catalog_snapshot)) { Status s = CreateTransactionsStatusTableIfNeeded(rpc); if (!s.ok()) { return s.CloneAndPrepend("Error while creating transaction status table"); } } else { VLOG(1) << "Not attempting to create a transaction status table:\n" << " " << EXPR_VALUE_FOR_LOG(is_transactional) << "\n " << " " << EXPR_VALUE_FOR_LOG(is_pg_catalog_table) << "\n " << " " << EXPR_VALUE_FOR_LOG(FLAGS_create_initial_sys_catalog_snapshot); } if (is_pg_catalog_table) { return CreateYsqlSysTable(orig_req, resp); } Status s; const char* const object_type = PROTO_PTR_IS_TABLE(orig_req) ? "table" : "index"; // Copy the request, so we can fill in some defaults. CreateTableRequestPB req = *orig_req; // Lookup the namespace and verify if it exists. TRACE("Looking up namespace"); auto ns = VERIFY_RESULT(FindNamespace(req.namespace_())); bool colocated; NamespaceId namespace_id; NamespaceName namespace_name; { auto ns_lock = ns->LockForRead(); if (ns->database_type() != GetDatabaseTypeForTable(req.table_type())) { Status s = STATUS(NotFound, "Namespace not found"); return SetupError(resp->mutable_error(), MasterErrorPB::NAMESPACE_NOT_FOUND, s); } namespace_id = ns->id(); namespace_name = ns->name(); colocated = ns->colocated(); } // For index table, find the table info scoped_refptr<TableInfo> indexed_table; if (IsIndex(req)) { TRACE("Looking up indexed table"); indexed_table = GetTableInfo(req.indexed_table_id()); if (indexed_table == nullptr) { return STATUS_SUBSTITUTE( NotFound, "The indexed table $0 does not exist", req.indexed_table_id()); } TRACE("Locking indexed table"); RETURN_NOT_OK(CheckIfTableDeletedOrNotVisibleToClient(indexed_table->LockForRead(), resp)); } // Determine if this table should be colocated. If not specified, the table should be colocated if // and only if the namespace is colocated. if (!req.colocated()) { // Opt out of colocation if the request says so. colocated = false; } else if (indexed_table && !indexed_table->colocated()) { // Opt out of colocation if the indexed table opted out of colocation. colocated = false; } // TODO: If this is a colocated index table in a colocated database, convert any hash partition // columns into range partition columns. This is because postgres does not know that this index // table is in a colocated database. When we get to the "tablespaces" step where we store this // into PG metadata, then PG will know if db/table is colocated and do the work there. if ((colocated || req.has_tablegroup_id()) && IsIndex(req)) { for (auto& col_pb : *req.mutable_schema()->mutable_columns()) { col_pb.set_is_hash_key(false); } } // Validate schema. Schema schema; RETURN_NOT_OK(SchemaFromPB(req.schema(), &schema)); RETURN_NOT_OK(ValidateCreateTableSchema(schema, resp)); // checking that referenced user-defined types (if any) exist. { SharedLock lock(mutex_); for (int i = 0; i < schema.num_columns(); i++) { for (const auto &udt_id : schema.column(i).type()->GetUserDefinedTypeIds()) { if (FindPtrOrNull(udtype_ids_map_, udt_id) == nullptr) { Status s = STATUS(InvalidArgument, "Referenced user-defined type not found"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, s); } } } } // TODO (ENG-1860) The referenced namespace and types retrieved/checked above could be deleted // some time between this point and table creation below. // Usually the column ids are available if it's called on the backup-restoring code path // (from CatalogManager::RecreateTable). Else the column ids must be empty in the client schema. if (!schema.has_column_ids()) { schema.InitColumnIdsByDefault(); } if (schema.table_properties().HasCopartitionTableId()) { return CreateCopartitionedTable(req, resp, rpc, schema, ns); } if (colocated || req.has_tablegroup_id()) { // If the table is colocated, then there should be no hash partition columns. // Do the same for tables that are being placed in tablegroups. if (schema.num_hash_key_columns() > 0) { Status s = STATUS(InvalidArgument, "Cannot colocate hash partitioned table"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, s); } } else if ( !req.partition_schema().has_hash_schema() && !req.partition_schema().has_range_schema()) { // If neither hash nor range schema have been specified by the protobuf request, we assume the // table uses a hash schema, and we use the table_type and hash_key to determine the hashing // scheme (redis or multi-column) that should be used. if (req.table_type() == REDIS_TABLE_TYPE) { req.mutable_partition_schema()->set_hash_schema(PartitionSchemaPB::REDIS_HASH_SCHEMA); } else if (schema.num_hash_key_columns() > 0) { req.mutable_partition_schema()->set_hash_schema(PartitionSchemaPB::MULTI_COLUMN_HASH_SCHEMA); } else { Status s = STATUS(InvalidArgument, "Unknown table type or partitioning method"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, s); } } // Verify that custom placement policy has not been specified for colocated table. const bool is_replication_info_set = IsReplicationInfoSet(req.replication_info()); if (is_replication_info_set && colocated) { Status s = STATUS(InvalidArgument, "Custom placement policy should not be set for " "colocated tables"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_TABLE_REPLICATION_INFO, s); } if (is_replication_info_set && req.table_type() == PGSQL_TABLE_TYPE) { const Status s = STATUS(InvalidArgument, "Cannot set placement policy for YSQL tables " "use Tablespaces instead"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_REQUEST, s); } // Get placement info. const ReplicationInfoPB& replication_info = VERIFY_RESULT( GetTableReplicationInfo(req.replication_info(), req.tablespace_id())); // Calculate number of tablets to be used. int num_tablets = req.schema().table_properties().num_tablets(); if (num_tablets <= 0) { num_tablets = req.num_tablets(); } if (num_tablets <= 0) { // Use default as client could have gotten the value before any tserver had heartbeated // to (a new) master leader. BlacklistSet blacklist = BlacklistSetFromPB(); TSDescriptorVector ts_descs; master_->ts_manager()->GetAllLiveDescriptorsInCluster( &ts_descs, replication_info.live_replicas().placement_uuid(), blacklist); num_tablets = ts_descs.size() * (is_pg_table ? FLAGS_ysql_num_shards_per_tserver : FLAGS_yb_num_shards_per_tserver); LOG(INFO) << "Setting default tablets to " << num_tablets << " with " << ts_descs.size() << " primary servers"; } // Create partitions. PartitionSchema partition_schema; vector<Partition> partitions; if (colocated || req.has_tablegroup_id()) { RETURN_NOT_OK(partition_schema.CreatePartitions(1, &partitions)); req.clear_partition_schema(); num_tablets = 1; } else { s = PartitionSchema::FromPB(req.partition_schema(), schema, &partition_schema); if (req.partitions_size() > 0) { if (req.partitions_size() != num_tablets) { Status s = STATUS(InvalidArgument, "Partitions are not defined for all tablets"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, s); } string last; for (const auto& p : req.partitions()) { Partition np; Partition::FromPB(p, &np); if (np.partition_key_start() != last) { Status s = STATUS(InvalidArgument, "Partitions does not cover the full partition keyspace"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, s); } last = np.partition_key_end(); partitions.push_back(std::move(np)); } } else { // Supplied number of partitions is merely a suggestion, actual number of // created partitions might differ. RETURN_NOT_OK(partition_schema.CreatePartitions(num_tablets, &partitions)); } // The vector 'partitions' contains real setup partitions, so the variable // should be updated. num_tablets = partitions.size(); } LOG(INFO) << "Set number of tablets: " << num_tablets; req.set_num_tablets(num_tablets); schema.mutable_table_properties()->SetNumTablets(num_tablets); // For index table, populate the index info. IndexInfoPB index_info; const bool index_backfill_enabled = IsIndexBackfillEnabled(orig_req->table_type(), is_transactional); if (req.has_index_info()) { // Current message format. index_info.CopyFrom(req.index_info()); // Assign column-ids that have just been computed and assigned to "index_info". if (!is_pg_table) { DCHECK_EQ(index_info.columns().size(), schema.num_columns()) << "Number of columns are not the same between index_info and index_schema"; for (int colidx = 0; colidx < schema.num_columns(); colidx++) { index_info.mutable_columns(colidx)->set_column_id(schema.column_id(colidx)); } } } else if (req.has_indexed_table_id()) { // Old client message format when rolling upgrade (Not having "index_info"). IndexInfoBuilder index_info_builder(&index_info); index_info_builder.ApplyProperties(req.indexed_table_id(), req.is_local_index(), req.is_unique_index()); if (orig_req->table_type() != PGSQL_TABLE_TYPE) { Schema indexed_schema; RETURN_NOT_OK(indexed_table->GetSchema(&indexed_schema)); RETURN_NOT_OK(index_info_builder.ApplyColumnMapping(indexed_schema, schema)); } } if ((req.has_index_info() || req.has_indexed_table_id()) && index_backfill_enabled && !req.skip_index_backfill()) { // Start off the index table with major compactions disabled. We need this to retain the delete // markers until the backfill process is completed. No need to set index_permissions in the // index table. schema.SetRetainDeleteMarkers(true); } LOG(INFO) << "CreateTable with IndexInfo " << AsString(index_info); TSDescriptorVector all_ts_descs; master_->ts_manager()->GetAllLiveDescriptors(&all_ts_descs); s = CheckValidReplicationInfo(replication_info, all_ts_descs, partitions, resp); if (!s.ok()) { return s; } scoped_refptr<TableInfo> table; vector<TabletInfo*> tablets; bool tablets_exist; bool tablegroup_tablets_exist = false; { LockGuard lock(mutex_); auto ns_lock = ns->LockForRead(); TRACE("Acquired catalog manager lock"); tablets_exist = colocated && colocated_tablet_ids_map_.find(ns->id()) != colocated_tablet_ids_map_.end(); // Verify that the table does not exist. table = FindPtrOrNull(table_names_map_, {namespace_id, req.name()}); if (table != nullptr) { s = STATUS_SUBSTITUTE(AlreadyPresent, "Object '$0.$1' already exists", ns->name(), table->name()); LOG(WARNING) << "Found table: " << table->ToStringWithState() << ". Failed creating table with error: " << s.ToString() << " Request:\n" << orig_req->DebugString(); // If the table already exists, we set the response table_id field to the id of the table that // already exists. This is necessary because before we return the error to the client (or // success in case of a "CREATE TABLE IF NOT EXISTS" request) we want to wait for the existing // table to be available to receive requests. And we need the table id for that. resp->set_table_id(table->id()); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_ALREADY_PRESENT, s); } // Namespace state validity check: // 1. Allow Namespaces that are RUNNING // 2. Allow Namespaces that are PREPARING under 2 situations // 2a. System Namespaces. // 2b. The parent table from a Colocated Namespace. const auto parent_table_name = ns->id() + kColocatedParentTableNameSuffix; bool valid_ns_state = (ns->state() == SysNamespaceEntryPB::RUNNING) || (ns->state() == SysNamespaceEntryPB::PREPARING && (ns->name() == kSystemNamespaceName || req.name() == parent_table_name)); if (!valid_ns_state) { Status s = STATUS_SUBSTITUTE(TryAgain, "Invalid Namespace State ($0). Cannot create $1.$2", SysNamespaceEntryPB::State_Name(ns->state()), ns->name(), req.name() ); return SetupError(resp->mutable_error(), NamespaceMasterError(ns->state()), s); } // Check whether this CREATE TABLE request which has a tablegroup_id is for a normal user table // or the request to create the parent table for the tablegroup. This is done by checking the // catalog manager maps. if (req.has_tablegroup_id() && tablegroup_tablet_ids_map_.find(ns->id()) != tablegroup_tablet_ids_map_.end() && tablegroup_tablet_ids_map_[ns->id()].find(req.tablegroup_id()) != tablegroup_tablet_ids_map_[ns->id()].end()) { tablegroup_tablets_exist = true; } RETURN_NOT_OK(CreateTableInMemory( req, schema, partition_schema, !tablets_exist && !tablegroup_tablets_exist /* create_tablets */, namespace_id, namespace_name, partitions, &index_info, &tablets, resp, &table)); // Section is executed when a table is either the parent table or a user table in a tablegroup. // It additionally sets the table metadata (and tablet metadata if this is the parent table) // to have the colocated property so we can take advantage of code reuse. if (req.has_tablegroup_id()) { table->mutable_metadata()->mutable_dirty()->pb.set_colocated(true); if (tablegroup_tablets_exist) { // If the table is not a tablegroup parent table, it performs a lookup for the proper tablet // to place the table on as a child table. scoped_refptr<TabletInfo> tablet = tablegroup_tablet_ids_map_[ns->id()][req.tablegroup_id()]; RSTATUS_DCHECK( tablet->colocated(), InternalError, "The tablet for tablegroup should be colocated."); tablets.push_back(tablet.get()); auto tablet_lock = tablet->LockForWrite(); tablet_lock.mutable_data()->pb.add_table_ids(table->id()); RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), tablet)); tablet_lock.Commit(); tablet->mutable_metadata()->StartMutation(); table->AddTablets(tablets); tablegroup_ids_map_[req.tablegroup_id()]->AddChildTable(table->id()); } else { // If the table is a tablegroup parent table, it creates a dummy tablet for the tablegroup // along with updating the catalog manager maps. RSTATUS_DCHECK_EQ( tablets.size(), 1, InternalError, "Only one tablet should be created for each tablegroup"); tablets[0]->mutable_metadata()->mutable_dirty()->pb.set_colocated(true); // Update catalog manager maps for tablegroups tablegroup_tablet_ids_map_[ns->id()][req.tablegroup_id()] = tablet_map_->find(tablets[0]->id())->second; } } else if (colocated) { table->mutable_metadata()->mutable_dirty()->pb.set_colocated(true); // if the tablet already exists, add the tablet to tablets if (tablets_exist) { scoped_refptr<TabletInfo> tablet = colocated_tablet_ids_map_[ns->id()]; RSTATUS_DCHECK( tablet->colocated(), InternalError, "The tablet for colocated database should be colocated."); tablets.push_back(tablet.get()); auto tablet_lock = tablet->LockForWrite(); tablet_lock.mutable_data()->pb.add_table_ids(table->id()); RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), tablet)); tablet_lock.Commit(); tablet->mutable_metadata()->StartMutation(); table->AddTablets(tablets); } else { // Record the tablet RSTATUS_DCHECK_EQ( tablets.size(), 1, InternalError, "Only one tablet should be created for each colocated database"); tablets[0]->mutable_metadata()->mutable_dirty()->pb.set_colocated(true); colocated_tablet_ids_map_[ns->id()] = tablet_map_->find(tablets[0]->id())->second; } } } // Tables with a transaction should be rolled back if the transaction does not get committed. // Store this on the table persistent state until the transaction has been a verified success. TransactionMetadata txn; if (req.has_transaction() && FLAGS_enable_transactional_ddl_gc) { table->mutable_metadata()->mutable_dirty()->pb.mutable_transaction()-> CopyFrom(req.transaction()); txn = VERIFY_RESULT(TransactionMetadata::FromPB(req.transaction())); RSTATUS_DCHECK(!txn.status_tablet.empty(), Corruption, "Given incomplete Transaction"); } if (PREDICT_FALSE(FLAGS_TEST_simulate_slow_table_create_secs > 0) && req.table_type() != TableType::TRANSACTION_STATUS_TABLE_TYPE) { LOG(INFO) << "Simulating slow table creation"; SleepFor(MonoDelta::FromSeconds(FLAGS_TEST_simulate_slow_table_create_secs)); } // NOTE: the table and tablets are already locked for write at this point, // since the CreateTableInfo/CreateTabletInfo functions leave them in that state. // They will get committed at the end of this function. // Sanity check: the tables and tablets should all be in "preparing" state. CHECK_EQ(SysTablesEntryPB::PREPARING, table->metadata().dirty().pb.state()); // Update the on-disk table state to "running". table->mutable_metadata()->mutable_dirty()->pb.set_state(SysTablesEntryPB::RUNNING); TRACE("Inserted new table and tablet info into CatalogManager maps"); VLOG_WITH_PREFIX(1) << "Inserted new table and tablet info into CatalogManager maps"; if (!tablets_exist && !tablegroup_tablets_exist) { // Write Tablets to sys-tablets (in "preparing" state). for (const TabletInfo *tablet : tablets) { CHECK_EQ(SysTabletsEntryPB::PREPARING, tablet->metadata().dirty().pb.state()); } } s = sys_catalog_->Upsert(leader_ready_term(), table, tablets); if (PREDICT_FALSE(!s.ok())) { return AbortTableCreation(table.get(), tablets, s.CloneAndPrepend( Substitute("An error occurred while inserting to sys-tablets: $0", s.ToString())), resp); } TRACE("Wrote tablets to system table"); s = sys_catalog_->Upsert(leader_ready_term(), table);; if (PREDICT_FALSE(!s.ok())) { return AbortTableCreation(table.get(), tablets, s.CloneAndPrepend( Substitute("An error occurred while inserting to sys-tablets: $0", s.ToString())), resp); } TRACE("Wrote table to system table"); // For index table, insert index info in the indexed table. if ((req.has_index_info() || req.has_indexed_table_id())) { if (index_backfill_enabled && !req.skip_index_backfill()) { if (is_pg_table) { // YSQL: start at some permission before backfill. The real enforcement happens with // pg_index system table's indislive and indisready columns. Choose WRITE_AND_DELETE // because it will probably be less confusing. index_info.set_index_permissions(INDEX_PERM_WRITE_AND_DELETE); } else { // YCQL index_info.set_index_permissions(INDEX_PERM_DELETE_ONLY); } } s = AddIndexInfoToTable(indexed_table, index_info, resp); if (PREDICT_FALSE(!s.ok())) { return AbortTableCreation(table.get(), tablets, s.CloneAndPrepend( Substitute("An error occurred while inserting index info: $0", s.ToString())), resp); } } // Commit the in-memory state. table->mutable_metadata()->CommitMutation(); for (TabletInfo *tablet : tablets) { tablet->mutable_metadata()->CommitMutation(); } if ((colocated && tablets_exist) || (req.has_tablegroup_id() && tablegroup_tablets_exist)) { auto call = std::make_shared<AsyncAddTableToTablet>(master_, AsyncTaskPool(), tablets[0], table); table->AddTask(call); WARN_NOT_OK(ScheduleTask(call), "Failed to send AddTableToTablet request"); } if (req.has_creator_role_name()) { const NamespaceName& keyspace_name = req.namespace_().name(); const TableName& table_name = req.name(); RETURN_NOT_OK(permissions_manager_->GrantPermissions( req.creator_role_name(), get_canonical_table(keyspace_name, table_name), table_name, keyspace_name, all_permissions_for_resource(ResourceType::TABLE), ResourceType::TABLE, resp)); } // Verify Transaction gets committed, which occurs after table create finishes. if (req.has_transaction() && PREDICT_TRUE(FLAGS_enable_transactional_ddl_gc)) { LOG(INFO) << "Enqueuing table for Transaction Verification: " << req.name(); std::function<Status(bool)> when_done = std::bind(&CatalogManager::VerifyTablePgLayer, this, table, _1); WARN_NOT_OK(background_tasks_thread_pool_->SubmitFunc( std::bind(&YsqlTransactionDdl::VerifyTransaction, &ysql_transaction_, txn, when_done)), "Could not submit VerifyTransaction to thread pool"); } LOG(INFO) << "Successfully created " << object_type << " " << table->ToString() << " in " << ns->ToString() << " per request from " << RequestorString(rpc); background_tasks_->Wake(); if (FLAGS_master_enable_metrics_snapshotter && !(req.table_type() == TableType::YQL_TABLE_TYPE && namespace_id == kSystemNamespaceId && req.name() == kMetricsSnapshotsTableName)) { Status s = CreateMetricsSnapshotsTableIfNeeded(rpc); if (!s.ok()) { return s.CloneAndPrepend("Error while creating metrics snapshots table"); } } DVLOG(3) << __PRETTY_FUNCTION__ << " Done."; return Status::OK(); } Status CatalogManager::VerifyTablePgLayer(scoped_refptr<TableInfo> table, bool rpc_success) { // Upon Transaction completion, check pg system table using OID to ensure SUCCESS. const uint32_t database_oid = VERIFY_RESULT(GetPgsqlDatabaseOidByTableId(table->id())); const auto pg_table_id = GetPgsqlTableId(database_oid, kPgClassTableOid); auto entry_exists = VERIFY_RESULT( ysql_transaction_.PgEntryExists(pg_table_id, GetPgsqlTableOid(table->id()))); auto l = table->LockForWrite(); auto& metadata = table->mutable_metadata()->mutable_dirty()->pb; SCHECK(metadata.state() == SysTablesEntryPB::RUNNING || metadata.state() == SysTablesEntryPB::ALTERING, Aborted, Substitute("Unexpected table state ($0), abandoning transaction GC work for $1", SysTablesEntryPB_State_Name(metadata.state()), table->ToString())); // #5981: Mark un-retryable rpc failures as pass to avoid infinite retry of GC'd txns. const bool txn_check_passed = entry_exists || !rpc_success; if (txn_check_passed) { // Remove the transaction from the entry since we're done processing it. metadata.clear_transaction(); RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), table)); if (entry_exists) { LOG_WITH_PREFIX(INFO) << "Table transaction succeeded: " << table->ToString(); } else { LOG_WITH_PREFIX(WARNING) << "Unknown RPC failure, removing transaction on table: " << table->ToString(); } // Commit the in-memory state. l.Commit(); } else { LOG(INFO) << "Table transaction failed, deleting: " << table->ToString(); // Async enqueue delete. DeleteTableRequestPB del_tbl_req; del_tbl_req.mutable_table()->set_table_name(table->name()); del_tbl_req.mutable_table()->set_table_id(table->id()); del_tbl_req.set_is_index_table(table->is_index()); RETURN_NOT_OK(background_tasks_thread_pool_->SubmitFunc( [this, del_tbl_req]() { DeleteTableResponsePB del_tbl_resp; WARN_NOT_OK(DeleteTable(&del_tbl_req, &del_tbl_resp, nullptr), "Failed to Delete Table with failed transaction"); })); } return Status::OK(); } Status CatalogManager::CreateTabletsFromTable(const vector<Partition>& partitions, const scoped_refptr<TableInfo>& table, std::vector<TabletInfo*>* tablets) { // Create the TabletInfo objects in state PREPARING. for (const Partition& partition : partitions) { PartitionPB partition_pb; partition.ToPB(&partition_pb); tablets->push_back(CreateTabletInfo(table.get(), partition_pb)); } // Add the table/tablets to the in-memory map for the assignment. table->AddTablets(*tablets); auto tablet_map_checkout = tablet_map_.CheckOut(); for (TabletInfo* tablet : *tablets) { InsertOrDie(tablet_map_checkout.get_ptr(), tablet->tablet_id(), tablet); } return Status::OK(); } int CatalogManager::GetNumReplicasFromPlacementInfo(const PlacementInfoPB& placement_info) { return placement_info.num_replicas() > 0 ? placement_info.num_replicas() : FLAGS_replication_factor; } Status CatalogManager::CheckValidReplicationInfo(const ReplicationInfoPB& replication_info, const TSDescriptorVector& all_ts_descs, const vector<Partition>& partitions, CreateTableResponsePB* resp) { return CheckValidPlacementInfo(replication_info.live_replicas(), all_ts_descs, partitions, resp); } Status CatalogManager::CheckValidPlacementInfo(const PlacementInfoPB& placement_info, const TSDescriptorVector& ts_descs, const vector<Partition>& partitions, CreateTableResponsePB* resp) { // Verify that the total number of tablets is reasonable, relative to the number // of live tablet servers. int num_live_tservers = ts_descs.size(); int num_replicas = GetNumReplicasFromPlacementInfo(placement_info); int max_tablets = FLAGS_max_create_tablets_per_ts * num_live_tservers; Status s; string msg; if (num_replicas > 1 && max_tablets > 0 && partitions.size() > max_tablets) { msg = Substitute("The requested number of tablets ($0) is over the permitted maximum ($1)", partitions.size(), max_tablets); s = STATUS(InvalidArgument, msg); LOG(WARNING) << msg; return SetupError(resp->mutable_error(), MasterErrorPB::TOO_MANY_TABLETS, s); } // Verify that the number of replicas isn't larger than the number of live tablet // servers. if (FLAGS_catalog_manager_check_ts_count_for_create_table && num_replicas > num_live_tservers) { msg = Substitute("Not enough live tablet servers to create table with replication factor $0. " "$1 tablet servers are alive.", num_replicas, num_live_tservers); LOG(WARNING) << msg << ". Placement info: " << placement_info.ShortDebugString() << ", replication factor flag: " << FLAGS_replication_factor; s = STATUS(InvalidArgument, msg); return SetupError(resp->mutable_error(), MasterErrorPB::REPLICATION_FACTOR_TOO_HIGH, s); } // Verify that placement requests are reasonable and we can satisfy the minimums. if (!placement_info.placement_blocks().empty()) { int minimum_sum = 0; for (const auto& pb : placement_info.placement_blocks()) { minimum_sum += pb.min_num_replicas(); if (!pb.has_cloud_info()) { msg = Substitute("Got placement info without cloud info set: $0", pb.ShortDebugString()); s = STATUS(InvalidArgument, msg); LOG(WARNING) << msg; return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, s); } } if (minimum_sum > num_replicas) { msg = Substitute("Sum of minimum replicas per placement ($0) is greater than num_replicas " " ($1)", minimum_sum, num_replicas); s = STATUS(InvalidArgument, msg); LOG(WARNING) << msg; return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, s); } if (!FLAGS_TEST_skip_placement_validation_createtable_api) { // Loop through placements and verify that there are sufficient TServers to satisfy the // minimum required replicas. for (const auto& pb : placement_info.placement_blocks()) { RETURN_NOT_OK(FindTServersForPlacementBlock(pb, ts_descs)); } // Verify that there are enough TServers to match the total required replication factor (which // could be more than the sum of the minimums). RETURN_NOT_OK(FindTServersForPlacementInfo(placement_info, ts_descs)); } } return Status::OK(); } Status CatalogManager::CreateTableInMemory(const CreateTableRequestPB& req, const Schema& schema, const PartitionSchema& partition_schema, const bool create_tablets, const NamespaceId& namespace_id, const NamespaceName& namespace_name, const std::vector<Partition>& partitions, IndexInfoPB* index_info, std::vector<TabletInfo*>* tablets, CreateTableResponsePB* resp, scoped_refptr<TableInfo>* table) { // Add the new table in "preparing" state. *table = CreateTableInfo(req, schema, partition_schema, namespace_id, namespace_name, index_info); const TableId& table_id = (*table)->id(); VLOG_WITH_PREFIX_AND_FUNC(2) << "Table: " << (**table).ToString() << ", create_tablets: " << create_tablets; auto table_ids_map_checkout = table_ids_map_.CheckOut(); (*table_ids_map_checkout)[table_id] = *table; // Do not add Postgres tables to the name map as the table name is not unique in a namespace. if (req.table_type() != PGSQL_TABLE_TYPE) { table_names_map_[{namespace_id, req.name()}] = *table; } if (create_tablets) { RETURN_NOT_OK(CreateTabletsFromTable(partitions, *table, tablets)); } if (resp != nullptr) { resp->set_table_id(table_id); } HandleNewTableId(table_id); return Status::OK(); } Result<bool> DoesTableExist(const Result<TableInfoPtr>& result) { if (result.ok()) { return true; } if (result.status().IsNotFound() && MasterError(result.status()) == MasterErrorPB::OBJECT_NOT_FOUND) { return false; } return result.status(); } Result<bool> CatalogManager::TableExists( const std::string& namespace_name, const std::string& table_name) const { TableIdentifierPB table_id_pb; table_id_pb.set_table_name(table_name); table_id_pb.mutable_namespace_()->set_name(namespace_name); return DoesTableExist(FindTable(table_id_pb)); } Status CatalogManager::CreateTransactionsStatusTableIfNeeded(rpc::RpcContext *rpc) { if (VERIFY_RESULT(TableExists(kSystemNamespaceName, kTransactionsTableName))) { VLOG(1) << "Transaction status table already exists, not creating."; return Status::OK(); } LOG(INFO) << "Creating the transaction status table"; // Set up a CreateTable request internally. CreateTableRequestPB req; CreateTableResponsePB resp; req.set_name(kTransactionsTableName); req.mutable_namespace_()->set_name(kSystemNamespaceName); req.set_table_type(TableType::TRANSACTION_STATUS_TABLE_TYPE); // Explicitly set the number tablets if the corresponding flag is set, otherwise CreateTable // will use the same defaults as for regular tables. if (FLAGS_transaction_table_num_tablets > 0) { req.mutable_schema()->mutable_table_properties()->set_num_tablets( FLAGS_transaction_table_num_tablets); req.set_num_tablets(FLAGS_transaction_table_num_tablets); } ColumnSchema hash(kRedisKeyColumnName, BINARY, /* is_nullable */ false, /* is_hash_key */ true); ColumnSchemaToPB(hash, req.mutable_schema()->mutable_columns()->Add()); Status s = CreateTable(&req, &resp, rpc); // We do not lock here so it is technically possible that the table was already created. // If so, there is nothing to do so we just ignore the "AlreadyPresent" error. if (!s.ok() && !s.IsAlreadyPresent()) { return s; } return Status::OK(); } Status CatalogManager::CreateMetricsSnapshotsTableIfNeeded(rpc::RpcContext *rpc) { if (VERIFY_RESULT(TableExists(kSystemNamespaceName, kMetricsSnapshotsTableName))) { return Status::OK(); } // Set up a CreateTable request internally. CreateTableRequestPB req; CreateTableResponsePB resp; req.set_name(kMetricsSnapshotsTableName); req.mutable_namespace_()->set_name(kSystemNamespaceName); req.set_table_type(TableType::YQL_TABLE_TYPE); // Explicitly set the number tablets if the corresponding flag is set, otherwise CreateTable // will use the same defaults as for regular tables. if (FLAGS_metrics_snapshots_table_num_tablets > 0) { req.mutable_schema()->mutable_table_properties()->set_num_tablets( FLAGS_metrics_snapshots_table_num_tablets); req.set_num_tablets(FLAGS_metrics_snapshots_table_num_tablets); } // Schema description: "node" refers to tserver uuid. "entity_type" can be either // "tserver" or "table". "entity_id" is uuid of corresponding tserver or table. // "metric" is the name of the metric and "value" is its val. "ts" is time at // which the snapshot was recorded. "details" is a json column for future extensibility. YBSchemaBuilder schemaBuilder; schemaBuilder.AddColumn("node")->Type(STRING)->HashPrimaryKey()->NotNull(); schemaBuilder.AddColumn("entity_type")->Type(STRING)->PrimaryKey()->NotNull(); schemaBuilder.AddColumn("entity_id")->Type(STRING)->PrimaryKey()->NotNull(); schemaBuilder.AddColumn("metric")->Type(STRING)->PrimaryKey()->NotNull(); schemaBuilder.AddColumn("ts")->Type(TIMESTAMP)->PrimaryKey()->NotNull()-> SetSortingType(ColumnSchema::SortingType::kDescending); schemaBuilder.AddColumn("value")->Type(INT64); schemaBuilder.AddColumn("details")->Type(JSONB); YBSchema ybschema; CHECK_OK(schemaBuilder.Build(&ybschema)); auto schema = yb::client::internal::GetSchema(ybschema); SchemaToPB(schema, req.mutable_schema()); Status s = CreateTable(&req, &resp, rpc); // We do not lock here so it is technically possible that the table was already created. // If so, there is nothing to do so we just ignore the "AlreadyPresent" error. if (s.IsAlreadyPresent()) { return Status::OK(); } return s; } Status CatalogManager::IsCreateTableDone(const IsCreateTableDoneRequestPB* req, IsCreateTableDoneResponsePB* resp) { TRACE("Looking up table"); // 1. Lookup the table and verify if it exists. scoped_refptr<TableInfo> table = VERIFY_RESULT(FindTable(req->table())); TRACE("Locking table"); auto l = table->LockForRead(); RETURN_NOT_OK(CheckIfTableDeletedOrNotVisibleToClient(l, resp)); const auto& pb = l->pb; // 2. Verify if the create is in-progress. TRACE("Verify if the table creation is in progress for $0", table->ToString()); resp->set_done(!table->IsCreateInProgress()); // 3. Set any current errors, if we are experiencing issues creating the table. This will be // bubbled up to the MasterService layer. If it is an error, it gets wrapped around in // MasterErrorPB::UNKNOWN_ERROR. RETURN_NOT_OK(table->GetCreateTableErrorStatus()); // 4. If this is an index, we are not done until the index is in the indexed table's schema. An // exception is YSQL system table indexes, which don't get added to their indexed tables' schemas. if (resp->done() && IsIndex(pb)) { auto& indexed_table_id = GetIndexedTableId(pb); // For user indexes (which add index info to indexed table's schema), // - if this index is created without backfill, // - waiting for the index to be in the indexed table's schema is sufficient, and, by that // point, things are fully created. // - if this index is created with backfill // - and it's YCQL, // - waiting for the index to be in the indexed table's schema means waiting for the // DELETE_ONLY index permission, and it's fine to return to the client before the index // gets the rest of the permissions because the expectation is that backfill will be // completed asynchronously. // - and it's YSQL, // - waiting for the index to be in the indexed table's schema means just that (DocDB index // permissions don't really matter for YSQL besides being used for backfill purposes), and // it's a signal for postgres to continue the index backfill process, activating index // state flags then later triggering backfill and so on. // For YSQL system indexes (which don't add index info to indexed table's schema), // - there's nothing additional to wait on. // Therefore, the only thing needed here is to check whether the index info is in the indexed // table's schema for user indexes. if (pb.table_type() == YQL_TABLE_TYPE || (pb.table_type() == PGSQL_TABLE_TYPE && IsUserCreatedTable(*table))) { GetTableSchemaRequestPB get_schema_req; GetTableSchemaResponsePB get_schema_resp; get_schema_req.mutable_table()->set_table_id(indexed_table_id); const bool get_fully_applied_indexes = true; const Status s = GetTableSchemaInternal(&get_schema_req, &get_schema_resp, get_fully_applied_indexes); if (!s.ok()) { resp->mutable_error()->Swap(get_schema_resp.mutable_error()); return s; } resp->set_done(false); for (const auto& index : get_schema_resp.indexes()) { if (index.has_table_id() && index.table_id() == table->id()) { resp->set_done(true); break; } } } } // If this is a transactional table we are not done until the transaction status table is created. // However, if we are currently initializing the system catalog snapshot, we don't create the // transactions table. if (!FLAGS_create_initial_sys_catalog_snapshot && resp->done() && pb.schema().table_properties().is_transactional()) { RETURN_NOT_OK(IsTransactionStatusTableCreated(resp)); } // We are not done until the metrics snapshots table is created. if (FLAGS_master_enable_metrics_snapshotter && resp->done() && !(table->GetTableType() == TableType::YQL_TABLE_TYPE && table->namespace_id() == kSystemNamespaceId && table->name() == kMetricsSnapshotsTableName)) { RETURN_NOT_OK(IsMetricsSnapshotsTableCreated(resp)); } // If this is a colocated table and there is a pending AddTableToTablet task then we are not done. if (resp->done() && pb.colocated()) { resp->set_done(!table->HasTasks(MonitoredTask::Type::ASYNC_ADD_TABLE_TO_TABLET)); } return Status::OK(); } Status CatalogManager::IsCreateTableInProgress(const TableId& table_id, CoarseTimePoint deadline, bool* create_in_progress) { DCHECK_ONLY_NOTNULL(create_in_progress); DCHECK(!table_id.empty()); IsCreateTableDoneRequestPB req; IsCreateTableDoneResponsePB resp; req.mutable_table()->set_table_id(table_id); RETURN_NOT_OK(IsCreateTableDone(&req, &resp)); if (resp.has_error()) { return StatusFromPB(resp.error().status()); } *create_in_progress = !resp.done(); return Status::OK(); } Status CatalogManager::WaitForCreateTableToFinish(const TableId& table_id) { MonoDelta default_admin_operation_timeout( MonoDelta::FromSeconds(FLAGS_yb_client_admin_operation_timeout_sec)); auto deadline = CoarseMonoClock::Now() + default_admin_operation_timeout; return client::RetryFunc( deadline, "Waiting on Create Table to be completed", "Timed out waiting for Table Creation", std::bind(&CatalogManager::IsCreateTableInProgress, this, table_id, _1, _2)); } Status CatalogManager::IsTransactionStatusTableCreated(IsCreateTableDoneResponsePB* resp) { IsCreateTableDoneRequestPB req; req.mutable_table()->set_table_name(kTransactionsTableName); req.mutable_table()->mutable_namespace_()->set_name(kSystemNamespaceName); return IsCreateTableDone(&req, resp); } Status CatalogManager::IsMetricsSnapshotsTableCreated(IsCreateTableDoneResponsePB* resp) { IsCreateTableDoneRequestPB req; req.mutable_table()->set_table_name(kMetricsSnapshotsTableName); req.mutable_table()->mutable_namespace_()->set_name(kSystemNamespaceName); req.mutable_table()->mutable_namespace_()->set_database_type(YQLDatabase::YQL_DATABASE_CQL); return IsCreateTableDone(&req, resp); } std::string CatalogManager::GenerateId(boost::optional<const SysRowEntry::Type> entity_type) { SharedLock lock(mutex_); return GenerateIdUnlocked(entity_type); } std::string CatalogManager::GenerateIdUnlocked( boost::optional<const SysRowEntry::Type> entity_type) { while (true) { // Generate id and make sure it is unique within its category. std::string id = oid_generator_.Next(); if (!entity_type) { return id; } switch (*entity_type) { case SysRowEntry::NAMESPACE: if (FindPtrOrNull(namespace_ids_map_, id) == nullptr) return id; break; case SysRowEntry::TABLE: if (FindPtrOrNull(*table_ids_map_, id) == nullptr) return id; break; case SysRowEntry::TABLET: if (FindPtrOrNull(*tablet_map_, id) == nullptr) return id; break; case SysRowEntry::UDTYPE: if (FindPtrOrNull(udtype_ids_map_, id) == nullptr) return id; break; case SysRowEntry::SNAPSHOT: return id; case SysRowEntry::CDC_STREAM: if (!CDCStreamExistsUnlocked(id)) return id; break; case SysRowEntry::CLUSTER_CONFIG: FALLTHROUGH_INTENDED; case SysRowEntry::ROLE: FALLTHROUGH_INTENDED; case SysRowEntry::REDIS_CONFIG: FALLTHROUGH_INTENDED; case SysRowEntry::UNIVERSE_REPLICATION: FALLTHROUGH_INTENDED; case SysRowEntry::SYS_CONFIG: FALLTHROUGH_INTENDED; case SysRowEntry::SNAPSHOT_SCHEDULE: FALLTHROUGH_INTENDED; case SysRowEntry::DDL_LOG_ENTRY: FALLTHROUGH_INTENDED; case SysRowEntry::UNKNOWN: LOG(DFATAL) << "Invalid id type: " << *entity_type; return id; } } } scoped_refptr<TableInfo> CatalogManager::CreateTableInfo(const CreateTableRequestPB& req, const Schema& schema, const PartitionSchema& partition_schema, const NamespaceId& namespace_id, const NamespaceName& namespace_name, IndexInfoPB* index_info) { DCHECK(schema.has_column_ids()); TableId table_id = !req.table_id().empty() ? req.table_id() : GenerateIdUnlocked(SysRowEntry::TABLE); scoped_refptr<TableInfo> table = NewTableInfo(table_id); if (req.has_tablespace_id()) { table->SetTablespaceIdForTableCreation(req.tablespace_id()); } table->mutable_metadata()->StartMutation(); SysTablesEntryPB *metadata = &table->mutable_metadata()->mutable_dirty()->pb; metadata->set_state(SysTablesEntryPB::PREPARING); metadata->set_name(req.name()); metadata->set_table_type(req.table_type()); metadata->set_namespace_id(namespace_id); metadata->set_namespace_name(namespace_name); metadata->set_version(0); metadata->set_next_column_id(ColumnId(schema.max_col_id() + 1)); if (req.has_replication_info()) { metadata->mutable_replication_info()->CopyFrom(req.replication_info()); } // Use the Schema object passed in, since it has the column IDs already assigned, // whereas the user request PB does not. SchemaToPB(schema, metadata->mutable_schema()); partition_schema.ToPB(metadata->mutable_partition_schema()); // For index table, set index details (indexed table id and whether the index is local). if (req.has_index_info()) { metadata->mutable_index_info()->CopyFrom(req.index_info()); // Set the deprecated fields also for compatibility reasons. metadata->set_indexed_table_id(req.index_info().indexed_table_id()); metadata->set_is_local_index(req.index_info().is_local()); metadata->set_is_unique_index(req.index_info().is_unique()); // Setup index info. if (index_info != nullptr) { index_info->set_table_id(table->id()); metadata->mutable_index_info()->CopyFrom(*index_info); } } else if (req.has_indexed_table_id()) { // Read data from the deprecated field and update the new fields. metadata->mutable_index_info()->set_indexed_table_id(req.indexed_table_id()); metadata->mutable_index_info()->set_is_local(req.is_local_index()); metadata->mutable_index_info()->set_is_unique(req.is_unique_index()); // Set the deprecated fields also for compatibility reasons. metadata->set_indexed_table_id(req.indexed_table_id()); metadata->set_is_local_index(req.is_local_index()); metadata->set_is_unique_index(req.is_unique_index()); // Setup index info. if (index_info != nullptr) { index_info->set_table_id(table->id()); metadata->mutable_index_info()->CopyFrom(*index_info); } } if (req.is_pg_shared_table()) { metadata->set_is_pg_shared_table(true); } return table; } TabletInfo* CatalogManager::CreateTabletInfo(TableInfo* table, const PartitionPB& partition) { TabletInfo* tablet = new TabletInfo(table, GenerateIdUnlocked(SysRowEntry::TABLET)); VLOG_WITH_PREFIX_AND_FUNC(2) << "Table: " << table->ToString() << ", tablet: " << tablet->ToString(); tablet->mutable_metadata()->StartMutation(); SysTabletsEntryPB *metadata = &tablet->mutable_metadata()->mutable_dirty()->pb; metadata->set_state(SysTabletsEntryPB::PREPARING); metadata->mutable_partition()->CopyFrom(partition); metadata->set_table_id(table->id()); // This is important: we are setting the first table id in the table_ids list // to be the id of the original table that creates the tablet. metadata->add_table_ids(table->id()); return tablet; } Status CatalogManager::RemoveTableIdsFromTabletInfo( TabletInfoPtr tablet_info, unordered_set<TableId> tables_to_remove) { auto tablet_lock = tablet_info->LockForWrite(); google::protobuf::RepeatedPtrField<std::string> new_table_ids; for (const auto& table_id : tablet_lock->pb.table_ids()) { if (tables_to_remove.find(table_id) == tables_to_remove.end()) { *new_table_ids.Add() = std::move(table_id); } } tablet_lock.mutable_data()->pb.mutable_table_ids()->Swap(&new_table_ids); RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), tablet_info)); tablet_lock.Commit(); return Status::OK(); } Result<scoped_refptr<TableInfo>> CatalogManager::FindTable( const TableIdentifierPB& table_identifier) const { SharedLock lock(mutex_); return FindTableUnlocked(table_identifier); } Result<scoped_refptr<TableInfo>> CatalogManager::FindTableUnlocked( const TableIdentifierPB& table_identifier) const { if (table_identifier.has_table_id()) { return FindTableByIdUnlocked(table_identifier.table_id()); } if (table_identifier.has_table_name()) { auto namespace_info = VERIFY_RESULT(FindNamespaceUnlocked(table_identifier.namespace_())); // We can't lookup YSQL table by name because Postgres concept of "schemas" // introduces ambiguity. if (namespace_info->database_type() == YQL_DATABASE_PGSQL) { return STATUS(InvalidArgument, "Cannot lookup YSQL table by name"); } auto it = table_names_map_.find({namespace_info->id(), table_identifier.table_name()}); if (it == table_names_map_.end()) { return STATUS_EC_FORMAT( NotFound, MasterError(MasterErrorPB::OBJECT_NOT_FOUND), "Table $0.$1 not found", namespace_info->name(), table_identifier.table_name()); } return it->second; } return STATUS(InvalidArgument, "Neither table id or table name are specified", table_identifier.ShortDebugString()); } Result<scoped_refptr<TableInfo>> CatalogManager::FindTableById( const TableId& table_id) const { SharedLock lock(mutex_); return FindTableByIdUnlocked(table_id); } Result<scoped_refptr<TableInfo>> CatalogManager::FindTableByIdUnlocked( const TableId& table_id) const { auto it = table_ids_map_->find(table_id); if (it == table_ids_map_->end()) { return STATUS_EC_FORMAT( NotFound, MasterError(MasterErrorPB::OBJECT_NOT_FOUND), "Table with identifier $0 not found", table_id); } return it->second; } Result<scoped_refptr<NamespaceInfo>> CatalogManager::FindNamespaceById( const NamespaceId& id) const { SharedLock lock(mutex_); return FindNamespaceByIdUnlocked(id); } Result<scoped_refptr<NamespaceInfo>> CatalogManager::FindNamespaceByIdUnlocked( const NamespaceId& id) const { auto it = namespace_ids_map_.find(id); if (it == namespace_ids_map_.end()) { VLOG_WITH_FUNC(4) << "Not found: " << id << "\n" << GetStackTrace(); return STATUS(NotFound, "Keyspace identifier not found", id, MasterError(MasterErrorPB::NAMESPACE_NOT_FOUND)); } return it->second; } Result<scoped_refptr<NamespaceInfo>> CatalogManager::FindNamespaceUnlocked( const NamespaceIdentifierPB& ns_identifier) const { if (ns_identifier.has_id()) { return FindNamespaceByIdUnlocked(ns_identifier.id()); } if (ns_identifier.has_name()) { auto db = GetDatabaseType(ns_identifier); auto it = namespace_names_mapper_[db].find(ns_identifier.name()); if (it == namespace_names_mapper_[db].end()) { return STATUS(NotFound, "Keyspace name not found", ns_identifier.name(), MasterError(MasterErrorPB::NAMESPACE_NOT_FOUND)); } return it->second; } LOG(DFATAL) << __func__ << ": " << ns_identifier.ShortDebugString() << ", \n" << GetStackTrace(); return STATUS(NotFound, "Neither keyspace id nor keyspace name is specified", ns_identifier.ShortDebugString(), MasterError(MasterErrorPB::NAMESPACE_NOT_FOUND)); } Result<scoped_refptr<NamespaceInfo>> CatalogManager::FindNamespace( const NamespaceIdentifierPB& ns_identifier) const { SharedLock lock(mutex_); return FindNamespaceUnlocked(ns_identifier); } Result<TableDescription> CatalogManager::DescribeTable( const TableIdentifierPB& table_identifier, bool succeed_if_create_in_progress) { TRACE("Looking up table"); return DescribeTable(VERIFY_RESULT(FindTable(table_identifier)), succeed_if_create_in_progress); } Result<TableDescription> CatalogManager::DescribeTable( const TableInfoPtr& table_info, bool succeed_if_create_in_progress) { TableDescription result; result.table_info = table_info; NamespaceId namespace_id; { TRACE("Locking table"); auto l = table_info->LockForRead(); if (!succeed_if_create_in_progress && table_info->IsCreateInProgress()) { return STATUS(IllegalState, "Table creation is in progress", table_info->ToString(), MasterError(MasterErrorPB::TABLE_CREATION_IS_IN_PROGRESS)); } table_info->GetAllTablets(&result.tablet_infos); namespace_id = table_info->namespace_id(); } TRACE("Looking up namespace"); result.namespace_info = VERIFY_RESULT(FindNamespaceById(namespace_id)); return result; } Result<string> CatalogManager::GetPgSchemaName(const TableInfoPtr& table_info) { RSTATUS_DCHECK_EQ(table_info->GetTableType(), PGSQL_TABLE_TYPE, InternalError, Format("Expected YSQL table, got: $0", table_info->GetTableType())); const uint32_t database_oid = VERIFY_RESULT(GetPgsqlDatabaseOid(table_info->namespace_id())); const uint32_t table_oid = VERIFY_RESULT(GetPgsqlTableOid(table_info->id())); const uint32_t relnamespace_oid = VERIFY_RESULT( sys_catalog_->ReadPgClassRelnamespace(database_oid, table_oid)); return sys_catalog_->ReadPgNamespaceNspname(database_oid, relnamespace_oid); } // Truncate a Table. Status CatalogManager::TruncateTable(const TruncateTableRequestPB* req, TruncateTableResponsePB* resp, rpc::RpcContext* rpc) { LOG(INFO) << "Servicing TruncateTable request from " << RequestorString(rpc) << ": " << req->ShortDebugString(); for (int i = 0; i < req->table_ids_size(); i++) { RETURN_NOT_OK(TruncateTable(req->table_ids(i), resp, rpc)); } return Status::OK(); } Status CatalogManager::TruncateTable(const TableId& table_id, TruncateTableResponsePB* resp, rpc::RpcContext* rpc) { // Lookup the table and verify if it exists. TRACE(Substitute("Looking up object by id $0", table_id)); scoped_refptr<TableInfo> table; { SharedLock lock(mutex_); table = FindPtrOrNull(*table_ids_map_, table_id); if (table == nullptr) { Status s = STATUS_SUBSTITUTE(NotFound, "The object with id $0 does not exist", table_id); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } } TRACE(Substitute("Locking object with id $0", table_id)); auto l = table->LockForRead(); RETURN_NOT_OK(CheckIfTableDeletedOrNotVisibleToClient(l, resp)); // Truncate on a colocated table should not hit master because it should be handled by a write // DML that creates a table-level tombstone. LOG_IF(WARNING, IsColocatedUserTable(*table)) << "cannot truncate a colocated table on master"; // Send a Truncate() request to each tablet in the table. SendTruncateTableRequest(table); LOG(INFO) << "Successfully initiated TRUNCATE for " << table->ToString() << " per request from " << RequestorString(rpc); background_tasks_->Wake(); // Truncate indexes also. // Note: PG table does not have references to indexes in the base table, so associated indexes // must be truncated from the PG code separately. const bool is_index = IsIndex(l->pb); DCHECK(!is_index || l->pb.indexes().empty()) << "indexes should be empty for index table"; for (const auto& index_info : l->pb.indexes()) { RETURN_NOT_OK(TruncateTable(index_info.table_id(), resp, rpc)); } return Status::OK(); } void CatalogManager::SendTruncateTableRequest(const scoped_refptr<TableInfo>& table) { vector<scoped_refptr<TabletInfo>> tablets; table->GetAllTablets(&tablets); for (const scoped_refptr<TabletInfo>& tablet : tablets) { SendTruncateTabletRequest(tablet); } } void CatalogManager::SendTruncateTabletRequest(const scoped_refptr<TabletInfo>& tablet) { LOG_WITH_PREFIX(INFO) << "Truncating tablet " << tablet->id(); auto call = std::make_shared<AsyncTruncate>(master_, AsyncTaskPool(), tablet); tablet->table()->AddTask(call); WARN_NOT_OK( ScheduleTask(call), Substitute("Failed to send truncate request for tablet $0", tablet->id())); } Status CatalogManager::IsTruncateTableDone(const IsTruncateTableDoneRequestPB* req, IsTruncateTableDoneResponsePB* resp) { LOG(INFO) << "Servicing IsTruncateTableDone request for table id " << req->table_id(); // Lookup the truncated table. TRACE("Looking up table $0", req->table_id()); scoped_refptr<TableInfo> table; { SharedLock lock(mutex_); table = FindPtrOrNull(*table_ids_map_, req->table_id()); } if (table == nullptr) { Status s = STATUS(NotFound, "The object does not exist: table with id", req->table_id()); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } TRACE("Locking table"); RETURN_NOT_OK(CheckIfTableDeletedOrNotVisibleToClient(table->LockForRead(), resp)); resp->set_done(!table->HasTasks(MonitoredTask::Type::ASYNC_TRUNCATE_TABLET)); return Status::OK(); } // Note: only used by YSQL as of 2020-10-29. Status CatalogManager::BackfillIndex( const BackfillIndexRequestPB* req, BackfillIndexResponsePB* resp, rpc::RpcContext* rpc) { const TableIdentifierPB& index_table_identifier = req->index_identifier(); scoped_refptr<TableInfo> index_table = VERIFY_RESULT(FindTable(index_table_identifier)); if (index_table->GetTableType() != PGSQL_TABLE_TYPE) { // This request is only supported for YSQL for now. YCQL has its own mechanism. return STATUS( InvalidArgument, "Unexpected non-YSQL table", index_table_identifier.ShortDebugString()); } // Collect indexed_table. scoped_refptr<TableInfo> indexed_table; { auto l = index_table->LockForRead(); TableId indexed_table_id = GetIndexedTableId(l->pb); resp->mutable_table_identifier()->set_table_id(indexed_table_id); indexed_table = GetTableInfo(indexed_table_id); } if (indexed_table == nullptr) { return STATUS(InvalidArgument, "Empty indexed table", index_table_identifier.ShortDebugString()); } // TODO(jason): when ready to use INDEX_PERM_DO_BACKFILL for resuming backfill across master // leader changes, replace the following (issue #6218). // Collect index_info_pb. IndexInfoPB index_info_pb; indexed_table->GetIndexInfo(index_table->id()).ToPB(&index_info_pb); if (index_info_pb.index_permissions() != INDEX_PERM_WRITE_AND_DELETE) { return SetupError( resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, STATUS_FORMAT( InvalidArgument, "Expected WRITE_AND_DELETE perm, got $0", IndexPermissions_Name(index_info_pb.index_permissions()))); } return MultiStageAlterTable::StartBackfillingData( this, indexed_table, {index_info_pb}, boost::none); } Status CatalogManager::GetBackfillJobs( const GetBackfillJobsRequestPB* req, GetBackfillJobsResponsePB* resp, rpc::RpcContext* rpc) { TableIdentifierPB table_id = req->table_identifier(); scoped_refptr<TableInfo> indexed_table = VERIFY_RESULT(FindTable(table_id)); if (indexed_table == nullptr) { Status s = STATUS(NotFound, "Requested table $0 does not exist", table_id.ShortDebugString()); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } { auto l = indexed_table->LockForRead(); resp->mutable_backfill_jobs()->CopyFrom(l->pb.backfill_jobs()); } return Status::OK(); } Status CatalogManager::LaunchBackfillIndexForTable( const LaunchBackfillIndexForTableRequestPB* req, LaunchBackfillIndexForTableResponsePB* resp, rpc::RpcContext* rpc) { const TableIdentifierPB& table_id = req->table_identifier(); scoped_refptr<TableInfo> indexed_table = VERIFY_RESULT(FindTable(table_id)); if (indexed_table == nullptr) { Status s = STATUS(NotFound, "Requested table $0 does not exist", table_id.ShortDebugString()); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } if (indexed_table->GetTableType() != YQL_TABLE_TYPE) { // This request is only supported for YCQL for now. YSQL has its own mechanism. return STATUS(InvalidArgument, "Unexpected non-YCQL table $0", table_id.ShortDebugString()); } uint32_t current_version; { auto l = indexed_table->LockForRead(); if (l->pb.state() != SysTablesEntryPB::RUNNING) { Status s = STATUS(TryAgain, "The table is in state $0. An alter may already be in progress.", SysTablesEntryPB_State_Name(l->pb.state())); VLOG(2) << "Table " << indexed_table->ToString() << " is not running returning " << s; return SetupError(resp->mutable_error(), MasterErrorPB::INTERNAL_ERROR, s); } current_version = l->pb.version(); } auto s = MultiStageAlterTable::LaunchNextTableInfoVersionIfNecessary( this, indexed_table, current_version, /* respect deferrals for backfill */ false); if (!s.ok()) { VLOG(3) << __func__ << " Done failed " << s; return SetupError(resp->mutable_error(), MasterErrorPB::UNKNOWN_ERROR, s); } return Status::OK(); } Status CatalogManager::MarkIndexInfoFromTableForDeletion( const TableId& indexed_table_id, const TableId& index_table_id, bool multi_stage, DeleteTableResponsePB* resp) { // Lookup the indexed table and verify if it exists. scoped_refptr<TableInfo> indexed_table = GetTableInfo(indexed_table_id); if (indexed_table == nullptr) { LOG(WARNING) << "Indexed table " << indexed_table_id << " for index " << index_table_id << " not found"; return Status::OK(); } if (resp) { auto ns_info = VERIFY_RESULT(master_->catalog_manager()->FindNamespaceById( indexed_table->namespace_id())); auto* resp_indexed_table = resp->mutable_indexed_table(); resp_indexed_table->mutable_namespace_()->set_name(ns_info->name()); resp_indexed_table->set_table_name(indexed_table->name()); resp_indexed_table->set_table_id(indexed_table_id); } if (multi_stage) { RETURN_NOT_OK(MultiStageAlterTable::UpdateIndexPermission( this, indexed_table, {{index_table_id, IndexPermissions::INDEX_PERM_WRITE_AND_DELETE_WHILE_REMOVING}})); } else { RETURN_NOT_OK(DeleteIndexInfoFromTable(indexed_table_id, index_table_id)); } // Actual Deletion of the index info will happen asynchronously after all the // tablets move to the new IndexPermission of DELETE_ONLY_WHILE_REMOVING. RETURN_NOT_OK(SendAlterTableRequest(indexed_table)); return Status::OK(); } Status CatalogManager::DeleteIndexInfoFromTable( const TableId& indexed_table_id, const TableId& index_table_id) { scoped_refptr<TableInfo> indexed_table = GetTableInfo(indexed_table_id); if (indexed_table == nullptr) { LOG(WARNING) << "Indexed table " << indexed_table_id << " for index " << index_table_id << " not found"; return Status::OK(); } TRACE("Locking indexed table"); auto l = indexed_table->LockForWrite(); auto &indexed_table_data = *l.mutable_data(); // Heed issue #6233. if (!l->pb.has_fully_applied_schema()) { MultiStageAlterTable::CopySchemaDetailsToFullyApplied(&indexed_table_data.pb); } auto *indexes = indexed_table_data.pb.mutable_indexes(); for (int i = 0; i < indexes->size(); i++) { if (indexes->Get(i).table_id() == index_table_id) { indexes->DeleteSubrange(i, 1); indexed_table_data.pb.set_version(indexed_table_data.pb.version() + 1); // TODO(Amit) : Is this compatible with the previous version? indexed_table_data.pb.set_updates_only_index_permissions(false); indexed_table_data.set_state( SysTablesEntryPB::ALTERING, Format("Delete index info version=$0 ts=$1", indexed_table_data.pb.version(), LocalTimeAsString())); // Update sys-catalog with the deleted indexed table info. TRACE("Updating indexed table metadata on disk"); RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), indexed_table)); // Update the in-memory state. TRACE("Committing in-memory state"); l.Commit(); return Status::OK(); } } LOG(WARNING) << "Index " << index_table_id << " not found in indexed table " << indexed_table_id; return Status::OK(); } Status CatalogManager::DeleteTable( const DeleteTableRequestPB* req, DeleteTableResponsePB* resp, rpc::RpcContext* rpc) { LOG(INFO) << "Servicing DeleteTable request from " << RequestorString(rpc) << ": " << req->ShortDebugString(); if (req->is_index_table()) { TRACE("Looking up index"); scoped_refptr<TableInfo> table = VERIFY_RESULT(FindTable(req->table())); TableId table_id = table->id(); resp->set_table_id(table_id); TableId indexed_table_id; { auto l = table->LockForRead(); indexed_table_id = GetIndexedTableId(l->pb); } scoped_refptr<TableInfo> indexed_table = GetTableInfo(indexed_table_id); const bool is_pg_table = indexed_table != nullptr && indexed_table->GetTableType() == PGSQL_TABLE_TYPE; bool is_transactional; { Schema index_schema; RETURN_NOT_OK(table->GetSchema(&index_schema)); is_transactional = index_schema.table_properties().is_transactional(); } const bool index_backfill_enabled = IsIndexBackfillEnabled(table->GetTableType(), is_transactional); if (!is_pg_table && index_backfill_enabled) { return MarkIndexInfoFromTableForDeletion( indexed_table_id, table_id, /* multi_stage */ true, resp); } } return DeleteTableInternal(req, resp, rpc); } // Delete a Table // - Update the table state to "DELETING". // - Issue DeleteTablet tasks to all said tablets. // - Update all the underlying tablet states as "DELETED". // // This order of events can help us guarantee that: // - If a table is DELETING/DELETED, we do not add further tasks to it. // - A DeleteTable is done when a table is either DELETING or DELETED and has no running tasks. // - If a table is DELETING and it has no tasks on it, then it is safe to mark DELETED. // // We are lazy about deletions. // // IMPORTANT: If modifying, consider updating DeleteYsqlDBTables(), the bulk deletion API. Status CatalogManager::DeleteTableInternal( const DeleteTableRequestPB* req, DeleteTableResponsePB* resp, rpc::RpcContext* rpc) { auto schedules_to_tables_map = VERIFY_RESULT( MakeSnapshotSchedulesToObjectIdsMap(SysRowEntry::TABLE)); vector<DeletingTableData> tables; RETURN_NOT_OK(DeleteTableInMemory(req->table(), req->is_index_table(), true /* update_indexed_table */, schedules_to_tables_map, &tables, resp, rpc)); // Delete any CDC streams that are set up on this table. TRACE("Deleting CDC streams on table"); // table_id for the requested table will be added to the end of the response. RSTATUS_DCHECK_GE(resp->deleted_table_ids_size(), 1, IllegalState, "DeleteTableInMemory expected to add the index id to resp"); RETURN_NOT_OK( DeleteCDCStreamsForTable(resp->deleted_table_ids(resp->deleted_table_ids_size() - 1))); // Update the in-memory state. TRACE("Committing in-memory state"); unordered_set<TableId> sys_table_ids; for (auto& table : tables) { if (IsSystemTable(*table.info)) { sys_table_ids.insert(table.info->id()); } table.write_lock.Commit(); } if (PREDICT_FALSE(FLAGS_catalog_manager_inject_latency_in_delete_table_ms > 0)) { LOG(INFO) << "Sleeping in CatalogManager::DeleteTable for " << FLAGS_catalog_manager_inject_latency_in_delete_table_ms << " ms"; SleepFor(MonoDelta::FromMilliseconds(FLAGS_catalog_manager_inject_latency_in_delete_table_ms)); } for (const auto& table : tables) { LOG(INFO) << "Deleting table: " << table.info->name() << ", retained by: " << AsString(table.retained_by_snapshot_schedules, &TryFullyDecodeUuid); // Send a DeleteTablet() request to each tablet replica in the table. RETURN_NOT_OK(DeleteTabletsAndSendRequests(table.info, table.retained_by_snapshot_schedules)); // Send a RemoveTableFromTablet() request to each colocated parent tablet replica in the table. // TODO(pitr) handle YSQL colocated tables. if (IsColocatedUserTable(*table.info)) { auto call = std::make_shared<AsyncRemoveTableFromTablet>( master_, AsyncTaskPool(), table.info->GetColocatedTablet(), table.info); table.info->AddTask(call); WARN_NOT_OK(ScheduleTask(call), "Failed to send RemoveTableFromTablet request"); } } // If there are any permissions granted on this table find them and delete them. This is necessary // because we keep track of the permissions based on the canonical resource name which is a // combination of the keyspace and table names, so if another table with the same name is created // (in the same keyspace where the previous one existed), and the permissions were not deleted at // the time of the previous table deletion, then the permissions that existed for the previous // table will automatically be granted to the new table even though this wasn't the intention. string canonical_resource = get_canonical_table(req->table().namespace_().name(), req->table().table_name()); RETURN_NOT_OK(permissions_manager_->RemoveAllPermissionsForResource(canonical_resource, resp)); // Remove the system tables from system catalog. if (!sys_table_ids.empty()) { // We do not expect system tables deletion during initial snapshot forming. DCHECK(!initial_snapshot_writer_); TRACE("Sending system table delete RPCs"); for (auto& table_id : sys_table_ids) { // "sys_catalog_->DeleteYsqlSystemTable(table_id)" won't work here // as it only acts on the leader. tserver::ChangeMetadataRequestPB change_req; change_req.set_tablet_id(kSysCatalogTabletId); change_req.set_remove_table_id(table_id); RETURN_NOT_OK(tablet::SyncReplicateChangeMetadataOperation( &change_req, sys_catalog_->tablet_peer().get(), leader_ready_term())); } } else { TRACE("No system tables to delete"); } LOG(INFO) << "Successfully initiated deletion of " << (req->is_index_table() ? "index" : "table") << " with " << req->table().DebugString() << " per request from " << RequestorString(rpc); // Asynchronously cleans up the final memory traces of the deleted database. background_tasks_->Wake(); return Status::OK(); } Status CatalogManager::DeleteTableInMemory( const TableIdentifierPB& table_identifier, const bool is_index_table, const bool update_indexed_table, const SnapshotSchedulesToObjectIdsMap& schedules_to_tables_map, vector<DeletingTableData>* tables, DeleteTableResponsePB* resp, rpc::RpcContext* rpc) { // TODO(NIC): How to handle a DeleteTable request when the namespace is being deleted? const char* const object_type = is_index_table ? "index" : "table"; const bool cascade_delete_index = is_index_table && !update_indexed_table; VLOG_WITH_PREFIX_AND_FUNC(1) << YB_STRUCT_TO_STRING( table_identifier, is_index_table, update_indexed_table) << "\n" << GetStackTrace(); // Lookup the table and verify if it exists. TRACE(Substitute("Looking up $0", object_type)); auto table_result = FindTable(table_identifier); if (!VERIFY_RESULT(DoesTableExist(table_result))) { if (cascade_delete_index) { LOG(WARNING) << "Index " << table_identifier.DebugString() << " not found"; return Status::OK(); } else { return table_result.status(); } } auto table = std::move(*table_result); TRACE(Substitute("Locking $0", object_type)); auto data = DeletingTableData { .info = table, .write_lock = table->LockForWrite(), }; auto& l = data.write_lock; // table_id for the requested table will be added to the end of the response. *resp->add_deleted_table_ids() = table->id(); if (is_index_table == IsTable(l->pb)) { Status s = STATUS(NotFound, "The object does not exist"); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } FillRetainedBySnapshotSchedules( schedules_to_tables_map, table->id(), &data.retained_by_snapshot_schedules); bool hide_only = !data.retained_by_snapshot_schedules.empty(); if (l->started_deleting() || (hide_only && l->started_hiding())) { if (cascade_delete_index) { LOG(WARNING) << "Index " << table_identifier.ShortDebugString() << " was " << (l->started_deleting() ? "deleted" : "hidden"); return Status::OK(); } else { Status s = STATUS(NotFound, "The object was deleted", l->pb.state_msg()); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } } bool was_hiding = l->started_hiding(); TRACE("Updating metadata on disk"); // Update the metadata for the on-disk state. if (hide_only) { l.mutable_data()->pb.set_hide_state(SysTablesEntryPB::HIDING); } else { l.mutable_data()->set_state(SysTablesEntryPB::DELETING, Substitute("Started deleting at $0", LocalTimeAsString())); } auto now = master_->clock()->Now(); DdlLogEntry ddl_log_entry(now, table->id(), l->pb, "Drop"); if (is_index_table) { const auto& indexed_table_id = GetIndexedTableId(l->pb); auto indexed_table = FindTableById(indexed_table_id); if (indexed_table.ok()) { auto lock = (**indexed_table).LockForRead(); ddl_log_entry = DdlLogEntry( now, indexed_table_id, lock->pb, Format("Drop index $0", l->name())); } } // Update sys-catalog with the removed table state. Status s = sys_catalog_->Upsert(leader_ready_term(), &ddl_log_entry, table); if (PREDICT_FALSE(FLAGS_TEST_simulate_crash_after_table_marked_deleting)) { return Status::OK(); } if (!s.ok()) { // The mutation will be aborted when 'l' exits the scope on early return. s = s.CloneAndPrepend("An error occurred while updating sys tables"); LOG(WARNING) << s; return CheckIfNoLongerLeaderAndSetupError(s, resp); } // Update the internal table maps. // Exclude Postgres tables which are not in the name map. // Also exclude hidden tables, that were already removed from this map. if (l.data().table_type() != PGSQL_TABLE_TYPE && !was_hiding) { TRACE("Removing from by-name map"); LockGuard lock(mutex_); if (table_names_map_.erase({l->namespace_id(), l->name()}) != 1) { PANIC_RPC(rpc, "Could not remove table from map, name=" + table->ToString()); } // We commit another map to increment its version and reset cache. // Since table_name_map_ does not have version. table_ids_map_.Commit(); } // For regular (indexed) table, delete all its index tables if any. Else for index table, delete // index info from the indexed table. if (!is_index_table) { TableIdentifierPB index_identifier; for (const auto& index : l->pb.indexes()) { index_identifier.set_table_id(index.table_id()); RETURN_NOT_OK(DeleteTableInMemory(index_identifier, true /* is_index_table */, false /* update_indexed_table */, schedules_to_tables_map, tables, resp, rpc)); } } else if (update_indexed_table) { s = MarkIndexInfoFromTableForDeletion( GetIndexedTableId(l->pb), table->id(), /* multi_stage */ false, resp); if (!s.ok()) { s = s.CloneAndPrepend(Substitute("An error occurred while deleting index info: $0", s.ToString())); LOG(WARNING) << s.ToString(); return CheckIfNoLongerLeaderAndSetupError(s, resp); } } if (!hide_only) { // If table is being hidden we should not abort snapshot related tasks. table->AbortTasks(); } // For regular (indexed) table, insert table info and lock in the front of the list. Else for // index table, append them to the end. We do so so that we will commit and delete the indexed // table first before its indexes. tables->insert(is_index_table ? tables->end() : tables->begin(), std::move(data)); return Status::OK(); } TableInfo::WriteLock CatalogManager::MaybeTransitionTableToDeleted(const TableInfoPtr& table) { if (!table) { LOG_WITH_PREFIX(INFO) << "Finished deleting an Orphaned tablet. " << "Table Information is null. Skipping updating its state to DELETED."; return TableInfo::WriteLock(); } if (table->HasTasks()) { VLOG_WITH_PREFIX_AND_FUNC(2) << table->ToString() << " has tasks"; return TableInfo::WriteLock(); } bool hide_only; { auto lock = table->LockForRead(); // For any table in DELETING state, we will want to mark it as DELETED once all its respective // tablets have been successfully removed from tservers. // For any hiding table we will want to mark it as HIDDEN once all its respective // tablets have been successfully hidden on tservers. hide_only = !lock->is_deleting(); if (hide_only && !lock->is_hiding()) { return TableInfo::WriteLock(); } } // The current relevant order of operations during a DeleteTable is: // 1) Mark the table as DELETING // 2) Abort the current table tasks // 3) Per tablet, send DeleteTable requests to all TS, then mark that tablet as DELETED // // This creates a race, wherein, after 2, HasTasks can be false, but we still have not // gotten to point 3, which would add further tasks for the deletes. // // However, HasTasks is cheaper than AreAllTabletsDeletedOrHidden... auto all_tablets_done = hide_only ? table->AreAllTabletsHidden() : table->AreAllTabletsDeleted(); VLOG_WITH_PREFIX_AND_FUNC(2) << table->ToString() << " hide only: " << hide_only << ", all tablets done: " << all_tablets_done; if (!all_tablets_done && !IsSystemTable(*table) && !IsColocatedUserTable(*table)) { return TableInfo::WriteLock(); } auto lock = table->LockForWrite(); if (lock->is_hiding()) { LOG(INFO) << "Marking table as HIDDEN: " << table->ToString(); lock.mutable_data()->pb.set_hide_state(SysTablesEntryPB::HIDDEN); return lock; } if (lock->is_deleting()) { // Update the metadata for the on-disk state. LOG(INFO) << "Marking table as DELETED: " << table->ToString(); lock.mutable_data()->set_state(SysTablesEntryPB::DELETED, Substitute("Deleted with tablets at $0", LocalTimeAsString())); return lock; } return TableInfo::WriteLock(); } void CatalogManager::CleanUpDeletedTables() { // TODO(bogdan): Cache tables being deleted to make this iterate only over those? vector<scoped_refptr<TableInfo>> tables_to_delete; // Garbage collecting. // Going through all tables under the global lock, copying them to not hold lock for too long. TableInfoMap copy_of_table_by_id_map; { LockGuard lock(mutex_); copy_of_table_by_id_map = *table_ids_map_; } // Mark the tables as DELETED and remove them from the in-memory maps. vector<TableInfo*> tables_to_update_on_disk; vector<TableInfo::WriteLock> table_locks; for (const auto& it : copy_of_table_by_id_map) { const auto& table = it.second; auto lock = MaybeTransitionTableToDeleted(table); if (lock.locked()) { table_locks.push_back(std::move(lock)); tables_to_update_on_disk.push_back(table.get()); } } if (tables_to_update_on_disk.size() > 0) { Status s = sys_catalog_->Upsert(leader_ready_term(), tables_to_update_on_disk);; if (!s.ok()) { LOG(WARNING) << "Error marking tables as DELETED: " << s.ToString(); return; } // Update the table in-memory info as DELETED after we've removed them from the maps. for (auto& lock : table_locks) { lock.Commit(); } // TODO: Check if we want to delete the totally deleted table from the sys_catalog here. // TODO: SysCatalog::DeleteItem() if we've DELETED all user tables in a DELETING namespace. // TODO: Also properly handle namespace_ids_map_.erase(table->namespace_id()) } } Status CatalogManager::IsDeleteTableDone(const IsDeleteTableDoneRequestPB* req, IsDeleteTableDoneResponsePB* resp) { // Lookup the deleted table. TRACE("Looking up table $0", req->table_id()); scoped_refptr<TableInfo> table; { SharedLock lock(mutex_); table = FindPtrOrNull(*table_ids_map_, req->table_id()); } if (table == nullptr) { LOG(INFO) << "Servicing IsDeleteTableDone request for table id " << req->table_id() << ": deleted (not found)"; resp->set_done(true); return Status::OK(); } TRACE("Locking table"); auto l = table->LockForRead(); if (!l->started_deleting() && !l->started_hiding()) { LOG(WARNING) << "Servicing IsDeleteTableDone request for table id " << req->table_id() << ": NOT deleted"; Status s = STATUS(IllegalState, "The object was NOT deleted", l->pb.state_msg()); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, s); } // Temporary fix for github issue #5290. // TODO: Wait till deletion completed for tablegroup parent table. if (IsTablegroupParentTable(*table)) { LOG(INFO) << "Servicing IsDeleteTableDone request for tablegroup parent table id " << req->table_id() << ": deleting. Skipping wait for DELETED state."; resp->set_done(true); return Status::OK(); } if (l->is_deleted() || l->is_hidden()) { LOG(INFO) << "Servicing IsDeleteTableDone request for table id " << req->table_id() << ": totally " << (l->is_hidden() ? "hidden" : "deleted"); resp->set_done(true); } else { LOG(INFO) << "Servicing IsDeleteTableDone request for table id " << req->table_id() << ((!IsColocatedUserTable(*table)) ? ": deleting tablets" : ""); std::vector<std::shared_ptr<TSDescriptor>> descs; master_->ts_manager()->GetAllDescriptors(&descs); for (auto& ts_desc : descs) { LOG(INFO) << "Deleting on " << ts_desc->permanent_uuid() << ": " << ts_desc->PendingTabletDeleteToString(); } resp->set_done(false); } return Status::OK(); } namespace { CHECKED_STATUS ApplyAlterSteps(server::Clock* clock, const TableId& table_id, const SysTablesEntryPB& current_pb, const AlterTableRequestPB* req, Schema* new_schema, ColumnId* next_col_id, std::vector<DdlLogEntry>* ddl_log_entries) { const SchemaPB& current_schema_pb = current_pb.schema(); Schema cur_schema; RETURN_NOT_OK(SchemaFromPB(current_schema_pb, &cur_schema)); SchemaBuilder builder(cur_schema); if (current_pb.has_next_column_id()) { builder.set_next_column_id(ColumnId(current_pb.next_column_id())); } if (current_pb.has_colocated() && current_pb.colocated()) { if (current_schema_pb.table_properties().is_ysql_catalog_table()) { Uuid cotable_id; RETURN_NOT_OK(cotable_id.FromHexString(req->table().table_id())); builder.set_cotable_id(cotable_id); } else { uint32_t pgtable_id = VERIFY_RESULT(GetPgsqlTableOid(req->table().table_id())); builder.set_pgtable_id(pgtable_id); } } for (const AlterTableRequestPB::Step& step : req->alter_schema_steps()) { auto time = clock->Now(); switch (step.type()) { case AlterTableRequestPB::ADD_COLUMN: { if (!step.has_add_column()) { return STATUS(InvalidArgument, "ADD_COLUMN missing column info"); } // Verify that encoding is appropriate for the new column's type. ColumnSchemaPB new_col_pb = step.add_column().schema(); if (new_col_pb.has_id()) { return STATUS_SUBSTITUTE(InvalidArgument, "column $0: client should not specify column id", new_col_pb.ShortDebugString()); } ColumnSchema new_col = ColumnSchemaFromPB(new_col_pb); RETURN_NOT_OK(builder.AddColumn(new_col, false)); ddl_log_entries->emplace_back(time, table_id, current_pb, Format("Add column $0", new_col)); break; } case AlterTableRequestPB::DROP_COLUMN: { if (!step.has_drop_column()) { return STATUS(InvalidArgument, "DROP_COLUMN missing column info"); } if (cur_schema.is_key_column(step.drop_column().name())) { return STATUS(InvalidArgument, "cannot remove a key column"); } RETURN_NOT_OK(builder.RemoveColumn(step.drop_column().name())); ddl_log_entries->emplace_back( time, table_id, current_pb, Format("Drop column $0", step.drop_column().name())); break; } case AlterTableRequestPB::RENAME_COLUMN: { if (!step.has_rename_column()) { return STATUS(InvalidArgument, "RENAME_COLUMN missing column info"); } RETURN_NOT_OK(builder.RenameColumn( step.rename_column().old_name(), step.rename_column().new_name())); ddl_log_entries->emplace_back( time, table_id, current_pb, Format("Rename column $0 => $1", step.rename_column().old_name(), step.rename_column().new_name())); break; } // TODO: EDIT_COLUMN. default: { return STATUS_SUBSTITUTE(InvalidArgument, "Invalid alter step type: $0", step.type()); } } } if (req->has_alter_properties()) { RETURN_NOT_OK(builder.AlterProperties(req->alter_properties())); } *new_schema = builder.Build(); *next_col_id = builder.next_column_id(); return Status::OK(); } } // namespace Status CatalogManager::AlterTable(const AlterTableRequestPB* req, AlterTableResponsePB* resp, rpc::RpcContext* rpc) { LOG(INFO) << "Servicing AlterTable request from " << RequestorString(rpc) << ": " << req->ShortDebugString(); std::vector<DdlLogEntry> ddl_log_entries; // Lookup the table and verify if it exists. TRACE("Looking up table"); scoped_refptr<TableInfo> table = VERIFY_RESULT(FindTable(req->table())); NamespaceId new_namespace_id; if (req->has_new_namespace()) { // Lookup the new namespace and verify if it exists. TRACE("Looking up new namespace"); scoped_refptr<NamespaceInfo> ns; NamespaceIdentifierPB namespace_identifier = req->new_namespace(); // Use original namespace_id as new_namespace_id for YSQL tables. if (table->GetTableType() == PGSQL_TABLE_TYPE && !namespace_identifier.has_id()) { namespace_identifier.set_id(table->namespace_id()); } ns = VERIFY_NAMESPACE_FOUND(FindNamespace(namespace_identifier), resp); auto ns_lock = ns->LockForRead(); new_namespace_id = ns->id(); // Don't use Namespaces that aren't running. if (ns->state() != SysNamespaceEntryPB::RUNNING) { Status s = STATUS_SUBSTITUTE(TryAgain, "Namespace not running (State=$0). Cannot create $1.$2", SysNamespaceEntryPB::State_Name(ns->state()), ns->name(), table->name() ); return SetupError(resp->mutable_error(), NamespaceMasterError(ns->state()), s); } } if (req->has_new_namespace() || req->has_new_table_name()) { if (new_namespace_id.empty()) { const Status s = STATUS(InvalidArgument, "No namespace used"); return SetupError(resp->mutable_error(), MasterErrorPB::NO_NAMESPACE_USED, s); } } TRACE("Locking table"); auto l = table->LockForWrite(); RETURN_NOT_OK(CheckIfTableDeletedOrNotVisibleToClient(l, resp)); bool has_changes = false; auto& table_pb = l.mutable_data()->pb; const TableName table_name = l->name(); const NamespaceId namespace_id = l->namespace_id(); const TableName new_table_name = req->has_new_table_name() ? req->new_table_name() : table_name; // Calculate new schema for the on-disk state, not persisted yet. Schema new_schema; ColumnId next_col_id = ColumnId(l->pb.next_column_id()); if (req->alter_schema_steps_size() || req->has_alter_properties()) { TRACE("Apply alter schema"); Status s = ApplyAlterSteps( master_->clock(), table->id(), l->pb, req, &new_schema, &next_col_id, &ddl_log_entries); if (!s.ok()) { return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, s); } DCHECK_NE(next_col_id, 0); DCHECK_EQ(new_schema.find_column_by_id(next_col_id), static_cast<int>(Schema::kColumnNotFound)); has_changes = true; } // Try to acquire the new table name. if (req->has_new_namespace() || req->has_new_table_name()) { // Postgres handles name uniqueness constraints in it's own layer. if (l->table_type() != PGSQL_TABLE_TYPE) { LockGuard lock(mutex_); VLOG_WITH_FUNC(3) << "Acquired the catalog manager lock"; TRACE("Acquired catalog manager lock"); // Verify that the table does not exist. scoped_refptr<TableInfo> other_table = FindPtrOrNull( table_names_map_, {new_namespace_id, new_table_name}); if (other_table != nullptr) { Status s = STATUS_SUBSTITUTE(AlreadyPresent, "Object '$0.$1' already exists", GetNamespaceNameUnlocked(new_namespace_id), other_table->name()); LOG(WARNING) << "Found table: " << other_table->ToStringWithState() << ". Failed alterring table with error: " << s.ToString() << " Request:\n" << req->DebugString(); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_ALREADY_PRESENT, s); } // Acquire the new table name (now we have 2 name for the same table). table_names_map_[{new_namespace_id, new_table_name}] = table; } table_pb.set_namespace_id(new_namespace_id); table_pb.set_name(new_table_name); has_changes = true; } // Check if there has been any changes to the placement policies for this table. if (req->has_replication_info()) { // If this is a colocated table, it does not make sense to set placement // policy for this table, as the tablet associated with it is shared by // multiple tables. if (table->colocated()) { const Status s = STATUS(InvalidArgument, "Placement policy cannot be altered for a colocated table"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_REQUEST, s); } if (table->GetTableType() == PGSQL_TABLE_TYPE) { const Status s = STATUS(InvalidArgument, "Placement policy cannot be altered for YSQL tables, use Tablespaces"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_REQUEST, s); } // Validate table replication info. RETURN_NOT_OK(ValidateTableReplicationInfo(req->replication_info())); table_pb.mutable_replication_info()->CopyFrom(req->replication_info()); has_changes = true; } // TODO(hector): Simplify the AlterSchema workflow to avoid doing the same checks on every layer // this request goes through: https://github.com/YugaByte/yugabyte-db/issues/1882. if (req->has_wal_retention_secs()) { if (has_changes) { const Status s = STATUS(InvalidArgument, "wal_retention_secs cannot be altered concurrently with other properties"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_REQUEST, s); } // TODO(hector): Handle co-partitioned tables: // https://github.com/YugaByte/yugabyte-db/issues/1905. table_pb.set_wal_retention_secs(req->wal_retention_secs()); has_changes = true; } if (!has_changes) { if (req->has_force_send_alter_request() && req->force_send_alter_request()) { RETURN_NOT_OK(SendAlterTableRequest(table, req)); } // Skip empty requests... return Status::OK(); } // Serialize the schema Increment the version number. if (new_schema.initialized()) { if (!l->pb.has_fully_applied_schema()) { // The idea here is that if we are in the middle of updating the schema // from one state to another, then YBClients will be given the older // version until the schema is updated on all the tablets. // As of Dec 2019, this may lead to some rejected operations/retries during // the index backfill. See #3284 for possible optimizations. MultiStageAlterTable::CopySchemaDetailsToFullyApplied(&table_pb); } SchemaToPB(new_schema, table_pb.mutable_schema()); } // Only increment the version number if it is a schema change (AddTable change goes through a // different path and it's not processed here). if (!req->has_wal_retention_secs()) { table_pb.set_version(table_pb.version() + 1); table_pb.set_updates_only_index_permissions(false); } table_pb.set_next_column_id(next_col_id); l.mutable_data()->set_state( SysTablesEntryPB::ALTERING, Substitute("Alter table version=$0 ts=$1", table_pb.version(), LocalTimeAsString())); // Update sys-catalog with the new table schema. TRACE("Updating metadata on disk"); std::vector<const DdlLogEntry*> ddl_log_entry_pointers; ddl_log_entry_pointers.reserve(ddl_log_entries.size()); for (const auto& entry : ddl_log_entries) { ddl_log_entry_pointers.push_back(&entry); } Status s = sys_catalog_->Upsert(leader_ready_term(), ddl_log_entry_pointers, table); if (!s.ok()) { s = s.CloneAndPrepend( Substitute("An error occurred while updating sys-catalog tables entry: $0", s.ToString())); LOG(WARNING) << s.ToString(); if (table->GetTableType() != PGSQL_TABLE_TYPE && (req->has_new_namespace() || req->has_new_table_name())) { LockGuard lock(mutex_); VLOG_WITH_FUNC(3) << "Acquired the catalog manager lock"; CHECK_EQ(table_names_map_.erase({new_namespace_id, new_table_name}), 1); } // TableMetadaLock follows RAII paradigm: when it leaves scope, // 'l' will be unlocked, and the mutation will be aborted. return CheckIfNoLongerLeaderAndSetupError(s, resp); } // Remove the old name. Not present if PGSQL. if (table->GetTableType() != PGSQL_TABLE_TYPE && (req->has_new_namespace() || req->has_new_table_name())) { TRACE("Removing (namespace, table) combination ($0, $1) from by-name map", namespace_id, table_name); LockGuard lock(mutex_); table_names_map_.erase({namespace_id, table_name}); } // Update the in-memory state. TRACE("Committing in-memory state"); l.Commit(); RETURN_NOT_OK(SendAlterTableRequest(table, req)); LOG(INFO) << "Successfully initiated ALTER TABLE (pending tablet schema updates) for " << table->ToString() << " per request from " << RequestorString(rpc); return Status::OK(); } Status CatalogManager::IsAlterTableDone(const IsAlterTableDoneRequestPB* req, IsAlterTableDoneResponsePB* resp) { // 1. Lookup the table and verify if it exists. TRACE("Looking up table"); scoped_refptr<TableInfo> table = VERIFY_RESULT(FindTable(req->table())); TRACE("Locking table"); auto l = table->LockForRead(); RETURN_NOT_OK(CheckIfTableDeletedOrNotVisibleToClient(l, resp)); // 2. Verify if the alter is in-progress. TRACE("Verify if there is an alter operation in progress for $0", table->ToString()); resp->set_schema_version(l->pb.version()); resp->set_done(l->pb.state() != SysTablesEntryPB::ALTERING); return Status::OK(); } Result<TabletInfo*> CatalogManager::RegisterNewTabletForSplit( TabletInfo* source_tablet_info, const PartitionPB& partition, TableInfo::WriteLock* table_write_lock) { const auto tablet_lock = source_tablet_info->LockForRead(); auto table = source_tablet_info->table(); TabletInfo* new_tablet; { LockGuard lock(mutex_); new_tablet = CreateTabletInfo(table.get(), partition); } const auto& source_tablet_meta = tablet_lock->pb; auto& new_tablet_meta = new_tablet->mutable_metadata()->mutable_dirty()->pb; new_tablet_meta.set_state(SysTabletsEntryPB::CREATING); new_tablet_meta.mutable_committed_consensus_state()->CopyFrom( source_tablet_meta.committed_consensus_state()); new_tablet_meta.set_split_depth(source_tablet_meta.split_depth() + 1); new_tablet_meta.set_split_parent_tablet_id(source_tablet_info->tablet_id()); // TODO(tsplit): consider and handle failure scenarios, for example: // - Crash or leader failover before sending out the split tasks. // - Long enough partition while trying to send out the splits so that they timeout and // not get executed. { LockGuard lock(mutex_); auto& table_pb = table_write_lock->mutable_data()->pb; table_pb.set_partition_list_version(table_pb.partition_list_version() + 1); RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), table)); // If we crash here - we will have new partitions version with the same set of tablets which // is harmless. // If we first save new_tablet to syscatalog and then crash - we would have table with old // partitions version, but new set of tablets which would break invariant that table partitions // set is not changed within the same partitions version. // TODO: rework this after https://github.com/yugabyte/yugabyte-db/issues/4912 is implemented. RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), new_tablet)); table->AddTablet(new_tablet); // TODO: We use this pattern in other places, but what if concurrent thread accesses not yet // committed TabletInfo from the `table` ? new_tablet->mutable_metadata()->CommitMutation(); auto tablet_map_checkout = tablet_map_.CheckOut(); (*tablet_map_checkout)[new_tablet->id()] = new_tablet; } LOG(INFO) << "Registered new tablet " << new_tablet->tablet_id() << " (" << AsString(partition) << ") to split the tablet " << source_tablet_info->tablet_id() << " (" << AsString(source_tablet_meta.partition()) << ") for table " << table->ToString(); return new_tablet; } Status CatalogManager::GetTableSchema(const GetTableSchemaRequestPB* req, GetTableSchemaResponsePB* resp) { VLOG(1) << "Servicing GetTableSchema request for " << req->ShortDebugString(); // Lookup the table and verify if it exists. TRACE("Looking up table"); scoped_refptr<TableInfo> table = VERIFY_RESULT(FindTable(req->table())); // Due to differences in the way proxies handle version mismatch (pull for yql vs push for sql). // For YQL tables, we will return the "set of indexes" being applied instead of the ones // that are fully completed. // For PGSQL (and other) tables we want to return the fully applied schema. const bool get_fully_applied_indexes = table->GetTableType() != TableType::YQL_TABLE_TYPE; return GetTableSchemaInternal(req, resp, get_fully_applied_indexes); } Status CatalogManager::GetTableSchemaInternal(const GetTableSchemaRequestPB* req, GetTableSchemaResponsePB* resp, bool get_fully_applied_indexes) { VLOG(1) << "Servicing GetTableSchema request for " << req->ShortDebugString(); // Lookup the table and verify if it exists. TRACE("Looking up table"); scoped_refptr<TableInfo> table = VERIFY_RESULT(FindTable(req->table())); TRACE("Locking table"); auto l = table->LockForRead(); RETURN_NOT_OK(CheckIfTableDeletedOrNotVisibleToClient(l, resp)); if (l->pb.has_fully_applied_schema()) { // An AlterTable is in progress; fully_applied_schema is the last // schema that has reached every TS. DCHECK(l->pb.state() == SysTablesEntryPB::ALTERING); resp->mutable_schema()->CopyFrom(l->pb.fully_applied_schema()); } else { // There's no AlterTable, the regular schema is "fully applied". resp->mutable_schema()->CopyFrom(l->pb.schema()); } if (get_fully_applied_indexes && l->pb.has_fully_applied_schema()) { resp->set_version(l->pb.fully_applied_schema_version()); resp->mutable_indexes()->CopyFrom(l->pb.fully_applied_indexes()); if (l->pb.has_fully_applied_index_info()) { resp->set_obsolete_indexed_table_id(GetIndexedTableId(l->pb)); *resp->mutable_index_info() = l->pb.fully_applied_index_info(); } VLOG(1) << "Returning" << "\nfully_applied_schema with version " << l->pb.fully_applied_schema_version() << ":\n" << yb::ToString(l->pb.fully_applied_indexes()) << "\ninstead of schema with version " << l->pb.version() << ":\n" << yb::ToString(l->pb.indexes()); } else { resp->set_version(l->pb.version()); resp->mutable_indexes()->CopyFrom(l->pb.indexes()); if (l->pb.has_index_info()) { resp->set_obsolete_indexed_table_id(GetIndexedTableId(l->pb)); *resp->mutable_index_info() = l->pb.index_info(); } VLOG(3) << "Returning" << "\nschema with version " << l->pb.version() << ":\n" << yb::ToString(l->pb.indexes()); } resp->set_is_compatible_with_previous_version(l->pb.updates_only_index_permissions()); resp->mutable_partition_schema()->CopyFrom(l->pb.partition_schema()); if (IsReplicationInfoSet(l->pb.replication_info())) { resp->mutable_replication_info()->CopyFrom(l->pb.replication_info()); } resp->set_create_table_done(!table->IsCreateInProgress()); resp->set_table_type(table->metadata().state().pb.table_type()); resp->mutable_identifier()->set_table_name(l->pb.name()); resp->mutable_identifier()->set_table_id(table->id()); resp->mutable_identifier()->mutable_namespace_()->set_id(table->namespace_id()); auto nsinfo = FindNamespaceById(table->namespace_id()); if (nsinfo.ok()) { resp->mutable_identifier()->mutable_namespace_()->set_name((**nsinfo).name()); } // Get namespace name by id. SharedLock lock(mutex_); TRACE("Looking up namespace"); const scoped_refptr<NamespaceInfo> ns = FindPtrOrNull(namespace_ids_map_, table->namespace_id()); if (ns == nullptr) { Status s = STATUS_SUBSTITUTE( NotFound, "Could not find namespace by namespace id $0 for request $1.", table->namespace_id(), req->DebugString()); return SetupError(resp->mutable_error(), MasterErrorPB::NAMESPACE_NOT_FOUND, s); } resp->mutable_identifier()->mutable_namespace_()->set_name(ns->name()); resp->set_colocated(table->colocated()); VLOG(1) << "Serviced GetTableSchema request for " << req->ShortDebugString() << " with " << yb::ToString(*resp); return Status::OK(); } Status CatalogManager::GetColocatedTabletSchema(const GetColocatedTabletSchemaRequestPB* req, GetColocatedTabletSchemaResponsePB* resp) { VLOG(1) << "Servicing GetColocatedTabletSchema request for " << req->ShortDebugString(); // Lookup the given parent colocated table and verify if it exists. TRACE("Looking up table"); auto parent_colocated_table = VERIFY_RESULT(FindTable(req->parent_colocated_table())); { TRACE("Locking table"); auto l = parent_colocated_table->LockForRead(); RETURN_NOT_OK(CheckIfTableDeletedOrNotVisibleToClient(l, resp)); } if (!parent_colocated_table->colocated() || !IsColocatedParentTable(*parent_colocated_table)) { return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_TABLE_TYPE, STATUS(InvalidArgument, "Table provided is not a parent colocated table")); } // Next get all the user tables that are in the database. ListTablesRequestPB listTablesReq; ListTablesResponsePB ListTablesResp; listTablesReq.mutable_namespace_()->set_id(parent_colocated_table->namespace_id()); listTablesReq.mutable_namespace_()->set_database_type(YQL_DATABASE_PGSQL); listTablesReq.set_exclude_system_tables(true); Status status = ListTables(&listTablesReq, &ListTablesResp); if (!status.ok() || ListTablesResp.has_error()) { LOG(ERROR) << "Error while listing tables: " << status; return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, status); } // Get the table schema for each colocated table. for (const auto& t : ListTablesResp.tables()) { // Need to check if this table is colocated first. TRACE("Looking up table"); scoped_refptr<TableInfo> table = VERIFY_RESULT(FindTableById(t.id())); if (table->colocated()) { // Now we can get the schema for this table. GetTableSchemaRequestPB schemaReq; GetTableSchemaResponsePB schemaResp; schemaReq.mutable_table()->set_table_id(t.id()); status = GetTableSchema(&schemaReq, &schemaResp); if (!status.ok() || schemaResp.has_error()) { LOG(ERROR) << "Error while getting table schema: " << status; return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, status); } resp->add_get_table_schema_response_pbs()->Swap(&schemaResp); } } return Status::OK(); } Status CatalogManager::ListTables(const ListTablesRequestPB* req, ListTablesResponsePB* resp) { NamespaceId namespace_id; // Validate namespace. if (req->has_namespace_()) { // Lookup the namespace and verify if it exists. auto ns = VERIFY_NAMESPACE_FOUND(FindNamespace(req->namespace_()), resp); auto ns_lock = ns->LockForRead(); namespace_id = ns->id(); // Don't list tables with a namespace that isn't running. if (ns->state() != SysNamespaceEntryPB::RUNNING) { LOG(INFO) << "ListTables request for a Namespace not running (State=" << SysNamespaceEntryPB::State_Name(ns->state()) << ")"; return Status::OK(); } } bool has_rel_filter = req->relation_type_filter_size() > 0; bool include_user_table = has_rel_filter ? false : true; bool include_user_index = has_rel_filter ? false : true; bool include_system_table = req->exclude_system_tables() ? false : (has_rel_filter ? false : true); for (const auto &relation : req->relation_type_filter()) { if (relation == SYSTEM_TABLE_RELATION) { include_system_table = true; } else if (relation == USER_TABLE_RELATION) { include_user_table = true; } else if (relation == INDEX_TABLE_RELATION) { include_user_index = true; } } SharedLock lock(mutex_); RelationType relation_type; for (const auto& entry : *table_ids_map_) { auto& table_info = *entry.second; auto ltm = table_info.LockForRead(); if (!ltm->visible_to_client() && !req->include_not_running()) { continue; } if (!namespace_id.empty() && namespace_id != table_info.namespace_id()) { continue; // Skip tables from other namespaces. } if (req->has_name_filter()) { size_t found = ltm->name().find(req->name_filter()); if (found == string::npos) { continue; } } if (IsUserIndexUnlocked(table_info)) { if (!include_user_index) { continue; } relation_type = INDEX_TABLE_RELATION; } else if (IsUserTableUnlocked(table_info)) { if (!include_user_table) { continue; } relation_type = USER_TABLE_RELATION; } else { if (!include_system_table) { continue; } relation_type = SYSTEM_TABLE_RELATION; } NamespaceIdentifierPB ns_identifier; ns_identifier.set_id(ltm->namespace_id()); auto ns = FindNamespaceUnlocked(ns_identifier); if (!ns.ok() || (**ns).state() != SysNamespaceEntryPB::RUNNING) { if (PREDICT_FALSE(FLAGS_TEST_return_error_if_namespace_not_found)) { VERIFY_NAMESPACE_FOUND(std::move(ns), resp); } LOG(ERROR) << "Unable to find namespace with id " << ltm->namespace_id() << " for table " << ltm->name(); continue; } ListTablesResponsePB::TableInfo *table = resp->add_tables(); { auto namespace_lock = (**ns).LockForRead(); table->mutable_namespace_()->set_id((**ns).id()); table->mutable_namespace_()->set_name(namespace_lock->name()); table->mutable_namespace_()->set_database_type(namespace_lock->pb.database_type()); } table->set_id(entry.second->id()); table->set_name(ltm->name()); table->set_table_type(ltm->table_type()); table->set_relation_type(relation_type); table->set_state(ltm->pb.state()); } return Status::OK(); } scoped_refptr<TableInfo> CatalogManager::GetTableInfo(const TableId& table_id) { SharedLock lock(mutex_); return FindPtrOrNull(*table_ids_map_, table_id); } scoped_refptr<TableInfo> CatalogManager::GetTableInfoFromNamespaceNameAndTableName( YQLDatabase db_type, const NamespaceName& namespace_name, const TableName& table_name) { if (db_type == YQL_DATABASE_PGSQL) return nullptr; SharedLock lock(mutex_); const auto ns = FindPtrOrNull(namespace_names_mapper_[db_type], namespace_name); return ns ? FindPtrOrNull(table_names_map_, {ns->id(), table_name}) : nullptr; } scoped_refptr<TableInfo> CatalogManager::GetTableInfoUnlocked(const TableId& table_id) { return FindPtrOrNull(*table_ids_map_, table_id); } std::vector<TableInfoPtr> CatalogManager::GetTables(GetTablesMode mode) { std::vector<TableInfoPtr> result; { SharedLock lock(mutex_); result.reserve(table_ids_map_->size()); for (const auto& e : *table_ids_map_) { result.push_back(e.second); } } switch (mode) { case GetTablesMode::kAll: return result; case GetTablesMode::kRunning: { auto filter = [](const TableInfoPtr& table_info) { return !table_info->is_running(); }; EraseIf(filter, &result); return result; } case GetTablesMode::kVisibleToClient: { auto filter = [](const TableInfoPtr& table_info) { return !table_info->LockForRead()->visible_to_client(); }; EraseIf(filter, &result); return result; } } FATAL_INVALID_ENUM_VALUE(GetTablesMode, mode); } void CatalogManager::GetAllNamespaces(std::vector<scoped_refptr<NamespaceInfo>>* namespaces, bool includeOnlyRunningNamespaces) { namespaces->clear(); SharedLock lock(mutex_); for (const NamespaceInfoMap::value_type& e : namespace_ids_map_) { if (includeOnlyRunningNamespaces && e.second->state() != SysNamespaceEntryPB::RUNNING) { continue; } namespaces->push_back(e.second); } } void CatalogManager::GetAllUDTypes(std::vector<scoped_refptr<UDTypeInfo>>* types) { types->clear(); SharedLock lock(mutex_); for (const UDTypeInfoMap::value_type& e : udtype_ids_map_) { types->push_back(e.second); } } std::vector<std::shared_ptr<MonitoredTask>> CatalogManager::GetRecentTasks() { return tasks_tracker_->GetTasks(); } std::vector<std::shared_ptr<MonitoredTask>> CatalogManager::GetRecentJobs() { return jobs_tracker_->GetTasks(); } NamespaceName CatalogManager::GetNamespaceNameUnlocked(const NamespaceId& id) const { const scoped_refptr<NamespaceInfo> ns = FindPtrOrNull(namespace_ids_map_, id); return ns == nullptr ? NamespaceName() : ns->name(); } NamespaceName CatalogManager::GetNamespaceName(const NamespaceId& id) const { TRACE("Acquired catalog manager lock"); SharedLock lock(mutex_); return GetNamespaceNameUnlocked(id); } NamespaceName CatalogManager::GetNamespaceNameUnlocked( const scoped_refptr<TableInfo>& table) const { return GetNamespaceNameUnlocked(table->namespace_id()); } NamespaceName CatalogManager::GetNamespaceName(const scoped_refptr<TableInfo>& table) const { return GetNamespaceName(table->namespace_id()); } bool CatalogManager::IsSystemTable(const TableInfo& table) const { return table.is_system(); } // True if table is created by user. // Table can be regular table or index in this case. bool CatalogManager::IsUserCreatedTable(const TableInfo& table) const { SharedLock lock(mutex_); return IsUserCreatedTableUnlocked(table); } bool CatalogManager::IsUserCreatedTableUnlocked(const TableInfo& table) const { if (table.GetTableType() == PGSQL_TABLE_TYPE || table.GetTableType() == YQL_TABLE_TYPE) { if (!IsSystemTable(table) && !IsSequencesSystemTable(table) && GetNamespaceNameUnlocked(table.namespace_id()) != kSystemNamespaceName && !IsColocatedParentTable(table) && !IsTablegroupParentTable(table)) { return true; } } return false; } bool CatalogManager::IsUserTable(const TableInfo& table) const { SharedLock lock(mutex_); return IsUserTableUnlocked(table); } bool CatalogManager::IsUserTableUnlocked(const TableInfo& table) const { return IsUserCreatedTableUnlocked(table) && table.indexed_table_id().empty(); } bool CatalogManager::IsUserIndex(const TableInfo& table) const { SharedLock lock(mutex_); return IsUserIndexUnlocked(table); } bool CatalogManager::IsUserIndexUnlocked(const TableInfo& table) const { return IsUserCreatedTableUnlocked(table) && !table.indexed_table_id().empty(); } bool CatalogManager::IsColocatedParentTableId(const TableId& table_id) const { return table_id.find(kColocatedParentTableIdSuffix) != std::string::npos; } bool CatalogManager::IsColocatedParentTable(const TableInfo& table) const { return IsColocatedParentTableId(table.id()); } bool CatalogManager::IsTablegroupParentTable(const TableInfo& table) const { return table.id().find(kTablegroupParentTableIdSuffix) != std::string::npos; } bool CatalogManager::IsColocatedUserTable(const TableInfo& table) const { return table.colocated() && !IsColocatedParentTable(table) && !IsTablegroupParentTable(table); } bool CatalogManager::IsSequencesSystemTable(const TableInfo& table) const { if (table.GetTableType() == PGSQL_TABLE_TYPE && !IsColocatedParentTable(table) && !IsTablegroupParentTable(table)) { // This case commonly occurs during unit testing. Avoid unnecessary assert within Get(). if (!IsPgsqlId(table.namespace_id()) || !IsPgsqlId(table.id())) { LOG(WARNING) << "Not PGSQL IDs " << table.namespace_id() << ", " << table.id(); return false; } Result<uint32_t> database_oid = GetPgsqlDatabaseOid(table.namespace_id()); if (!database_oid.ok()) { LOG(WARNING) << "Invalid Namespace ID " << table.namespace_id(); return false; } Result<uint32_t> table_oid = GetPgsqlTableOid(table.id()); if (!table_oid.ok()) { LOG(WARNING) << "Invalid Table ID " << table.id(); return false; } if (*database_oid == kPgSequencesDataDatabaseOid && *table_oid == kPgSequencesDataTableOid) { return true; } } return false; } void CatalogManager::NotifyTabletDeleteFinished(const TabletServerId& tserver_uuid, const TabletId& tablet_id, const TableInfoPtr& table) { shared_ptr<TSDescriptor> ts_desc; if (!master_->ts_manager()->LookupTSByUUID(tserver_uuid, &ts_desc)) { LOG(WARNING) << "Unable to find tablet server " << tserver_uuid; } else if (!ts_desc->IsTabletDeletePending(tablet_id)) { LOG(WARNING) << "Pending delete for tablet " << tablet_id << " in ts " << tserver_uuid << " doesn't exist"; } else { LOG(INFO) << "Clearing pending delete for tablet " << tablet_id << " in ts " << tserver_uuid; ts_desc->ClearPendingTabletDelete(tablet_id); } CheckTableDeleted(table); } bool CatalogManager::ReplicaMapDiffersFromConsensusState(const scoped_refptr<TabletInfo>& tablet, const ConsensusStatePB& cstate) { auto locs = tablet->GetReplicaLocations(); if (locs->size() != cstate.config().peers_size()) { return true; } for (auto iter = cstate.config().peers().begin(); iter != cstate.config().peers().end(); iter++) { if (locs->find(iter->permanent_uuid()) == locs->end()) { return true; } } return false; } namespace { int64_t GetCommittedConsensusStateOpIdIndex(const ReportedTabletPB& report) { if (!report.has_committed_consensus_state() || !report.committed_consensus_state().config().has_opid_index()) { return consensus::kInvalidOpIdIndex; } return report.committed_consensus_state().config().opid_index(); } } // namespace bool CatalogManager::ProcessCommittedConsensusState( TSDescriptor* ts_desc, bool is_incremental, const ReportedTabletPB& report, const TableInfo::ReadLock& table_lock, const TabletInfoPtr& tablet, const TabletInfo::WriteLock& tablet_lock, std::vector<RetryingTSRpcTaskPtr>* rpcs) { const ConsensusStatePB& prev_cstate = tablet_lock->pb.committed_consensus_state(); ConsensusStatePB cstate = report.committed_consensus_state(); bool tablet_was_mutated = false; // 6a. The master only processes reports for replicas with committed // consensus configurations since it needs the committed index to only // cache the most up-to-date config. Since it's possible for TOMBSTONED // replicas with no ConsensusMetadata on disk to be reported as having no // committed config opid_index, we skip over those replicas. if (!cstate.config().has_opid_index()) { LOG(WARNING) << "Missing opid_index in reported config: " << report.ShortDebugString(); return false; } if (PREDICT_TRUE(FLAGS_master_ignore_stale_cstate) && (cstate.current_term() < prev_cstate.current_term() || GetCommittedConsensusStateOpIdIndex(report) < prev_cstate.config().opid_index())) { LOG(WARNING) << "Stale heartbeat for Tablet " << tablet->ToString() << " on TS " << ts_desc->permanent_uuid() << "cstate=" << cstate.ShortDebugString() << ", prev_cstate=" << prev_cstate.ShortDebugString(); return false; } // 6b. Disregard the leader state if the reported leader is not a member // of the committed config. if (cstate.leader_uuid().empty() || !IsRaftConfigMember(cstate.leader_uuid(), cstate.config())) { cstate.clear_leader_uuid(); tablet_was_mutated = true; } // 6c. Mark the tablet as RUNNING if it makes sense to do so. // // We need to wait for a leader before marking a tablet as RUNNING, or // else we could incorrectly consider a tablet created when only a // minority of its replicas were successful. In that case, the tablet // would be stuck in this bad state forever. // - FLAG added to avoid waiting during mock tests. if (!tablet_lock->is_running() && report.state() == tablet::RUNNING && (cstate.has_leader_uuid() || !FLAGS_catalog_manager_wait_for_new_tablets_to_elect_leader)) { DCHECK_EQ(SysTabletsEntryPB::CREATING, tablet_lock->pb.state()) << "Tablet in unexpected state: " << tablet->ToString() << ": " << tablet_lock->pb.ShortDebugString(); VLOG(1) << "Tablet " << tablet->ToString() << " is now online"; tablet_lock.mutable_data()->set_state(SysTabletsEntryPB::RUNNING, "Tablet reported with an active leader"); tablet_was_mutated = true; } // 6d. Update the consensus state if: // - A config change operation was committed (reflected by a change to // the committed config's opid_index). // - The new cstate has a leader, and either the old cstate didn't, or // there was a term change. if (cstate.config().opid_index() > prev_cstate.config().opid_index() || (cstate.has_leader_uuid() && (!prev_cstate.has_leader_uuid() || cstate.current_term() > prev_cstate.current_term()))) { // 6d(i). Retain knowledge of the leader even if it wasn't reported in // the latest config. // // When a config change is reported to the master, it may not include the // leader because the follower doing the reporting may not know who the // leader is yet (it may have just started up). It is safe to reuse // the previous leader if the reported cstate has the same term as the // previous cstate, and the leader was known for that term. if (cstate.current_term() == prev_cstate.current_term()) { if (!cstate.has_leader_uuid() && prev_cstate.has_leader_uuid()) { cstate.set_leader_uuid(prev_cstate.leader_uuid()); // Sanity check to detect consensus divergence bugs. } else if (cstate.has_leader_uuid() && prev_cstate.has_leader_uuid() && cstate.leader_uuid() != prev_cstate.leader_uuid()) { string msg = Substitute("Previously reported cstate for tablet $0 gave " "a different leader for term $1 than the current cstate. " "Previous cstate: $2. Current cstate: $3.", tablet->ToString(), cstate.current_term(), prev_cstate.ShortDebugString(), cstate.ShortDebugString()); LOG(DFATAL) << msg; return false; } } // 6d(ii). Delete any replicas from the previous config that are not in the new one. if (FLAGS_master_tombstone_evicted_tablet_replicas) { unordered_set<string> current_member_uuids; for (const consensus::RaftPeerPB &peer : cstate.config().peers()) { InsertOrDie(&current_member_uuids, peer.permanent_uuid()); } for (const consensus::RaftPeerPB &prev_peer : prev_cstate.config().peers()) { const string& peer_uuid = prev_peer.permanent_uuid(); if (!ContainsKey(current_member_uuids, peer_uuid)) { // Don't delete a tablet server that hasn't reported in yet (Bootstrapping). shared_ptr<TSDescriptor> dummy_ts_desc; if (!master_->ts_manager()->LookupTSByUUID(peer_uuid, &dummy_ts_desc)) { continue; } // Otherwise, the TabletServer needs to remove this peer. rpcs->push_back(std::make_shared<AsyncDeleteReplica>( master_, AsyncTaskPool(), peer_uuid, tablet->table(), tablet->tablet_id(), TABLET_DATA_TOMBSTONED, prev_cstate.config().opid_index(), Substitute("TS $0 not found in new config with opid_index $1", peer_uuid, cstate.config().opid_index()))); } } } // 6d(iii). Update the in-memory ReplicaLocations for this tablet using the new config. VLOG(2) << "Updating replicas for tablet " << tablet->tablet_id() << " using config reported by " << ts_desc->permanent_uuid() << " to that committed in log index " << cstate.config().opid_index() << " with leader state from term " << cstate.current_term(); ReconcileTabletReplicasInLocalMemoryWithReport( tablet, ts_desc->permanent_uuid(), cstate, report); // 6d(iv). Update the consensus state. Don't use 'prev_cstate' after this. LOG(INFO) << "Tablet: " << tablet->tablet_id() << " reported consensus state change." << " New consensus state: " << cstate.ShortDebugString() << " from " << ts_desc->permanent_uuid(); *tablet_lock.mutable_data()->pb.mutable_committed_consensus_state() = cstate; tablet_was_mutated = true; } else { // Report opid_index is equal to the previous opid_index. If some // replica is reporting the same consensus configuration we already know about, but we // haven't yet heard from all the tservers in the config, update the in-memory // ReplicaLocations. LOG(INFO) << "Peer " << ts_desc->permanent_uuid() << " sent " << (is_incremental ? "incremental" : "full tablet") << " report for " << tablet->tablet_id() << ", prev state op id: " << prev_cstate.config().opid_index() << ", prev state term: " << prev_cstate.current_term() << ", prev state has_leader_uuid: " << prev_cstate.has_leader_uuid() << ". Consensus state: " << cstate.ShortDebugString(); if (GetAtomicFlag(&FLAGS_enable_register_ts_from_raft) && ReplicaMapDiffersFromConsensusState(tablet, cstate)) { ReconcileTabletReplicasInLocalMemoryWithReport( tablet, ts_desc->permanent_uuid(), cstate, report); } else { UpdateTabletReplicaInLocalMemory(ts_desc, &cstate, report, tablet); } } if (FLAGS_use_create_table_leader_hint && !cstate.has_leader_uuid() && cstate.current_term() == 0) { StartElectionIfReady(cstate, tablet.get()); } // 7. Send an AlterSchema RPC if the tablet has an old schema version. if (report.has_schema_version() && report.schema_version() != table_lock->pb.version()) { if (report.schema_version() > table_lock->pb.version()) { LOG(ERROR) << "TS " << ts_desc->permanent_uuid() << " has reported a schema version greater than the current one " << " for tablet " << tablet->ToString() << ". Expected version " << table_lock->pb.version() << " got " << report.schema_version() << " (corruption)"; } else { // TODO: For Alter (rolling apply to tablets), this is an expected transitory state. LOG(INFO) << "TS " << ts_desc->permanent_uuid() << " does not have the latest schema for tablet " << tablet->ToString() << ". Expected version " << table_lock->pb.version() << " got " << report.schema_version(); } // It's possible that the tablet being reported is a laggy replica, and in fact // the leader has already received an AlterTable RPC. That's OK, though -- // it'll safely ignore it if we send another. TransactionId txn_id = TransactionId::Nil(); if (table_lock->pb.has_transaction() && table_lock->pb.transaction().has_transaction_id()) { LOG(INFO) << "Parsing transaction ID for tablet ID " << tablet->tablet_id(); auto txn_id_res = FullyDecodeTransactionId(table_lock->pb.transaction().transaction_id()); if (!txn_id_res.ok()) { LOG(WARNING) << "Parsing transaction ID failed for tablet ID " << tablet->tablet_id(); return false; } txn_id = txn_id_res.get(); } LOG(INFO) << "Triggering AlterTable with transaction ID " << txn_id << " due to heartbeat delay for tablet ID " << tablet->tablet_id(); rpcs->push_back(std::make_shared<AsyncAlterTable>( master_, AsyncTaskPool(), tablet, tablet->table(), txn_id)); } return tablet_was_mutated; } Status CatalogManager::ProcessTabletReportBatch( TSDescriptor* ts_desc, bool is_incremental, ReportedTablets::const_iterator begin, ReportedTablets::const_iterator end, TabletReportUpdatesPB* full_report_update, std::vector<RetryingTSRpcTaskPtr>* rpcs) { // 1. First Pass. Iterate in TabletId Order to discover all Table locks we'll need. // Need to acquire both types of locks in Id order to prevent deadlock. std::map<TableId, TableInfo::ReadLock> table_read_locks; for (auto it = begin; it != end; ++it) { auto& lock = table_read_locks[it->info->table()->id()]; if (!lock.locked()) { lock = it->info->table()->LockForRead(); } } map<TabletId, TabletInfo::WriteLock> tablet_write_locks; // used for unlock. // 2. Second Pass. Process each tablet. This may not be in the order that the tablets // appear in 'full_report', but that has no bearing on correctness. vector<TabletInfo*> mutated_tablets; // refcount protected by 'tablet_infos' for (auto it = begin; it != end; ++it) { const auto& tablet_id = it->tablet_id; const TabletInfoPtr& tablet = it->info; const ReportedTabletPB& report = *it->report; const TableInfoPtr& table = tablet->table(); // Prepare an heartbeat response entry for this tablet, now that we're going to process it. // Every tablet in the report that is processed gets one, even if there are no changes to it. ReportedTabletUpdatesPB* update = full_report_update->add_tablets(); update->set_tablet_id(tablet_id); // Get tablet lock on demand. This works in the batch case because the loop is ordered. tablet_write_locks[tablet_id] = tablet->LockForWrite(); auto& table_lock = table_read_locks[table->id()]; auto& tablet_lock = tablet_write_locks[tablet_id]; TRACE_EVENT1("master", "HandleReportedTablet", "tablet_id", report.tablet_id()); RETURN_NOT_OK_PREPEND(CheckIsLeaderAndReady(), Substitute("This master is no longer the leader, unable to handle report for tablet $0", tablet_id)); VLOG(3) << "tablet report: " << report.ShortDebugString(); // 3. Delete the tablet if it (or its table) have been deleted. if (tablet_lock->is_deleted() || table_lock->started_deleting()) { const string msg = tablet_lock->pb.state_msg(); update->set_state_msg(msg); LOG(INFO) << "Got report from deleted tablet " << tablet->ToString() << " (" << msg << "): Sending delete request for this tablet"; // TODO(unknown): Cancel tablet creation, instead of deleting, in cases // where that might be possible (tablet creation timeout & replacement). rpcs->push_back(std::make_shared<AsyncDeleteReplica>( master_, AsyncTaskPool(), ts_desc->permanent_uuid(), table, tablet_id, TABLET_DATA_DELETED, boost::none, msg)); continue; } if (!table_lock->is_running()) { const string msg = tablet_lock->pb.state_msg(); LOG(INFO) << "Got report from tablet " << tablet->tablet_id() << " for non-running table " << table->ToString() << ": " << msg; update->set_state_msg(msg); continue; } // 3. Tombstone a replica that is no longer part of the Raft config (and // not already tombstoned or deleted outright). // // If the report includes a committed raft config, we only tombstone if // the opid_index is strictly less than the latest reported committed // config. This prevents us from spuriously deleting replicas that have // just been added to the committed config and are in the process of copying. const ConsensusStatePB& prev_cstate = tablet_lock->pb.committed_consensus_state(); const int64_t prev_opid_index = prev_cstate.config().opid_index(); const int64_t report_opid_index = GetCommittedConsensusStateOpIdIndex(report); if (FLAGS_master_tombstone_evicted_tablet_replicas && report.tablet_data_state() != TABLET_DATA_TOMBSTONED && report.tablet_data_state() != TABLET_DATA_DELETED && report_opid_index < prev_opid_index && !IsRaftConfigMember(ts_desc->permanent_uuid(), prev_cstate.config())) { const string delete_msg = (report_opid_index == consensus::kInvalidOpIdIndex) ? "Replica has no consensus available" : Substitute("Replica with old config index $0", report_opid_index); rpcs->push_back(std::make_shared<AsyncDeleteReplica>( master_, AsyncTaskPool(), ts_desc->permanent_uuid(), table, tablet_id, TABLET_DATA_TOMBSTONED, prev_opid_index, Substitute("$0 (current committed config index is $1)", delete_msg, prev_opid_index))); continue; } // 4. Skip a non-deleted tablet which reports an error. if (report.has_error()) { Status s = StatusFromPB(report.error()); DCHECK(!s.ok()); DCHECK_EQ(report.state(), tablet::FAILED); LOG(WARNING) << "Tablet " << tablet->ToString() << " has failed on TS " << ts_desc->permanent_uuid() << ": " << s.ToString(); continue; } // 5. Process the report's consensus state. // The report will not have a committed_consensus_state if it is in the // middle of starting up, such as during tablet bootstrap. // If we received an incremental report, and the tablet is starting up, we will update the // replica so that the balancer knows how many tablets are in the middle of remote bootstrap. if (report.has_committed_consensus_state()) { if (ProcessCommittedConsensusState( ts_desc, is_incremental, report, table_lock, tablet, tablet_lock, rpcs)) { // 6. If the tablet was mutated, add it to the tablets to be re-persisted. // // Done here and not on a per-mutation basis to avoid duplicate entries. mutated_tablets.push_back(tablet.get()); } } else if (is_incremental && (report.state() == tablet::NOT_STARTED || report.state() == tablet::BOOTSTRAPPING)) { // When a tablet server is restarted, it sends a full tablet report with all of its tablets // in the NOT_STARTED state, so this would make the load balancer think that all the // tablets are being remote bootstrapped at once, so only process incremental reports here. UpdateTabletReplicaInLocalMemory(ts_desc, nullptr /* consensus */, report, tablet); } } // Finished one round of batch processing. // 7. Unlock the tables; we no longer need to access their state. for (auto& l : table_read_locks) { l.second.Unlock(); } table_read_locks.clear(); // 8. Write all tablet mutations to the catalog table. // // SysCatalogTable::Write will short-circuit the case where the data has not // in fact changed since the previous version and avoid any unnecessary mutations. if (!mutated_tablets.empty()) { Status s = sys_catalog_->Upsert(leader_ready_term(), mutated_tablets); if (!s.ok()) { LOG(WARNING) << "Error updating tablets: " << s; return s; } } // 9. Publish the in-memory tablet mutations and release the locks. for (auto& l : tablet_write_locks) { l.second.Commit(); } tablet_write_locks.clear(); // 10. Third Pass. Process all tablet schema version changes. // (This is separate from tablet state mutations because only table on-disk state is changed.) for (auto it = begin; it != end; ++it) { const ReportedTabletPB& report = *it->report; if (!report.has_schema_version()) { continue; } const TabletInfoPtr& tablet = it->info; auto leader = tablet->GetLeader(); if (leader.ok() && leader.get()->permanent_uuid() == ts_desc->permanent_uuid()) { RETURN_NOT_OK(HandleTabletSchemaVersionReport(tablet.get(), report.schema_version())); } } return Status::OK(); } Status CatalogManager::ProcessTabletReport(TSDescriptor* ts_desc, const TabletReportPB& full_report, TabletReportUpdatesPB* full_report_update, RpcContext* rpc) { int num_tablets = full_report.updated_tablets_size(); TRACE_EVENT2("master", "ProcessTabletReport", "requestor", rpc->requestor_string(), "num_tablets", num_tablets); VLOG_WITH_PREFIX(2) << "Received tablet report from " << RequestorString(rpc) << "(" << ts_desc->permanent_uuid() << "): " << full_report.DebugString(); if (!ts_desc->has_tablet_report() && full_report.is_incremental()) { LOG_WITH_PREFIX(WARNING) << "Invalid tablet report from " << ts_desc->permanent_uuid() << ": Received an incremental tablet report when a full one was needed"; // We should respond with success in order to send reply that we need full report. return Status::OK(); } // TODO: on a full tablet report, we may want to iterate over the tablets we think // the server should have, compare vs the ones being reported, and somehow mark // any that have been "lost" (eg somehow the tablet metadata got corrupted or something). ReportedTablets reported_tablets; // Tablet Deletes to process after the catalog lock below. set<TabletId> tablets_to_delete; { // Lock the catalog to iterate over tablet_ids_map_ & table_ids_map_. SharedLock lock(mutex_); // Fill the above variables before processing full_report_update->mutable_tablets()->Reserve(num_tablets); for (const ReportedTabletPB& report : full_report.updated_tablets()) { const string& tablet_id = report.tablet_id(); // 1a. Find the tablet, deleting/skipping it if it can't be found. scoped_refptr<TabletInfo> tablet = FindPtrOrNull(*tablet_map_, tablet_id); if (!tablet) { // If a TS reported an unknown tablet, send a delete tablet rpc to the TS. LOG(INFO) << "Null tablet reported, possibly the TS was not around when the" " table was being deleted. Sending Delete tablet RPC to this TS."; tablets_to_delete.insert(tablet_id); // Every tablet in the report that is processed gets a heartbeat response entry. ReportedTabletUpdatesPB* update = full_report_update->add_tablets(); update->set_tablet_id(tablet_id); continue; } if (!tablet->table() || FindOrNull(*table_ids_map_, tablet->table()->id()) == nullptr) { auto table_id = tablet->table() == nullptr ? "(null)" : tablet->table()->id(); LOG(INFO) << "Got report from an orphaned tablet " << tablet_id << " on table " << table_id; tablets_to_delete.insert(tablet_id); // Every tablet in the report that is processed gets a heartbeat response entry. ReportedTabletUpdatesPB* update = full_report_update->add_tablets(); update->set_tablet_id(tablet_id); continue; } // 1b. Found the tablet, update local state. reported_tablets.push_back(ReportedTablet { .tablet_id = tablet_id, .info = tablet, .report = &report, }); } } std::sort(reported_tablets.begin(), reported_tablets.end(), [](const auto& lhs, const auto& rhs) { return lhs.tablet_id < rhs.tablet_id; }); // Process any delete requests from orphaned tablets, identified above. for (auto tablet_id : tablets_to_delete) { SendDeleteTabletRequest(tablet_id, TABLET_DATA_DELETED, boost::none, nullptr, ts_desc, "Report from an orphaned tablet"); } // Calculate the deadline for this expensive loop coming up. const auto safe_deadline = rpc->GetClientDeadline() - (FLAGS_heartbeat_rpc_timeout_ms * 1ms * FLAGS_heartbeat_safe_deadline_ratio); // Process tablets by batches. for (auto tablet_iter = reported_tablets.begin(); tablet_iter != reported_tablets.end();) { auto batch_begin = tablet_iter; tablet_iter += std::min<size_t>( reported_tablets.end() - tablet_iter, FLAGS_catalog_manager_report_batch_size); // Keeps track of all RPCs that should be sent when we're done with a single batch. std::vector<RetryingTSRpcTaskPtr> rpcs; auto status = ProcessTabletReportBatch( ts_desc, full_report.is_incremental(), batch_begin, tablet_iter, full_report_update, &rpcs); if (!status.ok()) { for (auto& rpc : rpcs) { rpc->AbortAndReturnPrevState(status); } return status; } // 13. Send all queued RPCs. for (auto& rpc : rpcs) { DCHECK(rpc->table()); rpc->table()->AddTask(rpc); WARN_NOT_OK(ScheduleTask(rpc), Substitute("Failed to send $0", rpc->description())); } rpcs.clear(); // 14. Check deadline. Need to exit before processing all batches if we're close to timing out. if (ts_desc->HasCapability(CAPABILITY_TabletReportLimit) && tablet_iter != reported_tablets.end()) { // [TESTING] Inject latency before processing a batch to test deadline. if (PREDICT_FALSE(FLAGS_TEST_inject_latency_during_tablet_report_ms > 0)) { LOG(INFO) << "Sleeping in CatalogManager::ProcessTabletReport for " << FLAGS_TEST_inject_latency_during_tablet_report_ms << " ms"; SleepFor(MonoDelta::FromMilliseconds(FLAGS_TEST_inject_latency_during_tablet_report_ms)); } // Return from here at configured safe heartbeat deadline to give the response packet time. if (safe_deadline < CoarseMonoClock::Now()) { LOG(INFO) << "Reached Heartbeat deadline. Returning early after processing " << full_report_update->tablets_size() << " tablets"; full_report_update->set_processing_truncated(true); return Status::OK(); } } } // Loop to process the next batch until fully iterated. if (!full_report.is_incremental()) { // A full report may take multiple heartbeats. // The TS communicates how much is left to process for the full report beyond this specific HB. bool completed_full_report = !full_report.has_remaining_tablet_count() || full_report.remaining_tablet_count() == 0; if (full_report.updated_tablets_size() == 0) { LOG(INFO) << ts_desc->permanent_uuid() << " sent full tablet report with 0 tablets."; } else if (!ts_desc->has_tablet_report()) { LOG(INFO) << ts_desc->permanent_uuid() << (completed_full_report ? " finished" : " receiving") << " first full report: " << full_report.updated_tablets_size() << " tablets."; } // We have a tablet report only once we're done processing all the chunks of the initial report. ts_desc->set_has_tablet_report(completed_full_report); } // 14. Queue background processing if we had updates. if (full_report.updated_tablets_size() > 0) { background_tasks_->WakeIfHasPendingUpdates(); } return Status::OK(); } Status CatalogManager::CreateTablegroup(const CreateTablegroupRequestPB* req, CreateTablegroupResponsePB* resp, rpc::RpcContext* rpc) { CreateTableRequestPB ctreq; CreateTableResponsePB ctresp; // Sanity check for PB fields. if (!req->has_id() || !req->has_namespace_id() || !req->has_namespace_name()) { Status s = STATUS(InvalidArgument, "Improper CREATE TABLEGROUP request (missing fields)."); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, s); } // Use the tablegroup id as the prefix for the parent table id. const auto parent_table_id = req->id() + kTablegroupParentTableIdSuffix; const auto parent_table_name = req->id() + kTablegroupParentTableNameSuffix; ctreq.set_name(parent_table_name); ctreq.set_table_id(parent_table_id); ctreq.mutable_namespace_()->set_name(req->namespace_name()); ctreq.mutable_namespace_()->set_id(req->namespace_id()); ctreq.set_table_type(PGSQL_TABLE_TYPE); ctreq.set_tablegroup_id(req->id()); YBSchemaBuilder schemaBuilder; schemaBuilder.AddColumn("parent_column")->Type(BINARY)->PrimaryKey()->NotNull(); YBSchema ybschema; CHECK_OK(schemaBuilder.Build(&ybschema)); auto schema = yb::client::internal::GetSchema(ybschema); SchemaToPB(schema, ctreq.mutable_schema()); if (!FLAGS_TEST_tablegroup_master_only) { ctreq.mutable_schema()->mutable_table_properties()->set_is_transactional(true); } // Create a parent table, which will create the tablet. Status s = CreateTable(&ctreq, &ctresp, rpc); resp->set_parent_table_id(ctresp.table_id()); resp->set_parent_table_name(parent_table_name); // Carry over error. if (ctresp.has_error()) { resp->mutable_error()->Swap(ctresp.mutable_error()); } // We do not lock here so it is technically possible that the table was already created. // If so, there is nothing to do so we just ignore the "AlreadyPresent" error. if (!s.ok() && !s.IsAlreadyPresent()) { LOG(WARNING) << "Tablegroup creation failed: " << s.ToString(); return s; } // Update catalog manager maps SharedLock lock(mutex_); TRACE("Acquired catalog manager lock"); TablegroupInfo *tg = new TablegroupInfo(req->id(), req->namespace_id()); tablegroup_ids_map_[req->id()] = tg; return s; } Status CatalogManager::DeleteTablegroup(const DeleteTablegroupRequestPB* req, DeleteTablegroupResponsePB* resp, rpc::RpcContext* rpc) { DeleteTableRequestPB dtreq; DeleteTableResponsePB dtresp; // Sanity check for PB fields if (!req->has_id() || !req->has_namespace_id()) { Status s = STATUS(InvalidArgument, "Improper DELETE TABLEGROUP request (missing fields)."); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, s); } // Use the tablegroup id as the prefix for the parent table id. const auto parent_table_id = req->id() + kTablegroupParentTableIdSuffix; const auto parent_table_name = req->id() + kTablegroupParentTableNameSuffix; dtreq.mutable_table()->set_table_name(parent_table_name); dtreq.mutable_table()->set_table_id(parent_table_id); dtreq.set_is_index_table(false); Status s = DeleteTable(&dtreq, &dtresp, rpc); resp->set_parent_table_id(dtresp.table_id()); // Carry over error. if (dtresp.has_error()) { resp->mutable_error()->Swap(dtresp.mutable_error()); return s; } // Perform map updates. SharedLock lock(mutex_); TRACE("Acquired catalog manager lock"); tablegroup_ids_map_.erase(req->id()); tablegroup_tablet_ids_map_[req->namespace_id()].erase(req->id()); LOG(INFO) << "Deleted table " << parent_table_name; return s; } Status CatalogManager::ListTablegroups(const ListTablegroupsRequestPB* req, ListTablegroupsResponsePB* resp, rpc::RpcContext* rpc) { SharedLock lock(mutex_); if (!req->has_namespace_id()) { Status s = STATUS(InvalidArgument, "Improper ListTablegroups request (missing fields)."); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_SCHEMA, s); } if (tablegroup_tablet_ids_map_.find(req->namespace_id()) == tablegroup_tablet_ids_map_.end()) { return STATUS(NotFound, "Tablegroups not found for namespace id: ", req->namespace_id()); } for (const auto& entry : tablegroup_tablet_ids_map_[req->namespace_id()]) { const TablegroupId tgid = entry.first; if (tablegroup_ids_map_.find(tgid) == tablegroup_ids_map_.end()) { LOG(WARNING) << "Tablegroup info in " << req->namespace_id() << " not found for tablegroup id: " << tgid; continue; } scoped_refptr<TablegroupInfo> tginfo = tablegroup_ids_map_[tgid]; TablegroupIdentifierPB *tg = resp->add_tablegroups(); tg->set_id(tginfo->id()); tg->set_namespace_id(tginfo->namespace_id()); } return Status::OK(); } bool CatalogManager::HasTablegroups() { SharedLock lock(mutex_); return !tablegroup_ids_map_.empty(); } Status CatalogManager::CreateNamespace(const CreateNamespaceRequestPB* req, CreateNamespaceResponsePB* resp, rpc::RpcContext* rpc) { Status return_status; // Copy the request, so we can fill in some defaults. LOG(INFO) << "CreateNamespace from " << RequestorString(rpc) << ": " << req->DebugString(); scoped_refptr<NamespaceInfo> ns; std::vector<scoped_refptr<TableInfo>> pgsql_tables; TransactionMetadata txn; const auto db_type = GetDatabaseType(*req); { LockGuard lock(mutex_); TRACE("Acquired catalog manager lock"); // Validate the user request. // Verify that the namespace does not already exist. ns = FindPtrOrNull(namespace_ids_map_, req->namespace_id()); // Same ID. if (ns == nullptr && db_type != YQL_DATABASE_PGSQL) { // PGSQL databases have name uniqueness handled at a different layer, so ignore overlaps. ns = FindPtrOrNull(namespace_names_mapper_[db_type], req->name()); } if (ns != nullptr) { resp->set_id(ns->id()); return_status = STATUS_SUBSTITUTE(AlreadyPresent, "Keyspace '$0' already exists", req->name()); LOG(WARNING) << "Found keyspace: " << ns->id() << ". Failed creating keyspace with error: " << return_status.ToString() << " Request:\n" << req->DebugString(); return SetupError(resp->mutable_error(), MasterErrorPB::NAMESPACE_ALREADY_PRESENT, return_status); } // Add the new namespace. // Create unique id for this new namespace. NamespaceId new_id = !req->namespace_id().empty() ? req->namespace_id() : GenerateIdUnlocked(SysRowEntry::NAMESPACE); ns = new NamespaceInfo(new_id); ns->mutable_metadata()->StartMutation(); SysNamespaceEntryPB *metadata = &ns->mutable_metadata()->mutable_dirty()->pb; metadata->set_name(req->name()); metadata->set_database_type(db_type); metadata->set_colocated(req->colocated()); metadata->set_state(SysNamespaceEntryPB::PREPARING); // For namespace created for a Postgres database, save the list of tables and indexes for // for the database that need to be copied. if (db_type == YQL_DATABASE_PGSQL) { if (req->source_namespace_id().empty()) { metadata->set_next_pg_oid(req->next_pg_oid()); } else { const auto source_oid = GetPgsqlDatabaseOid(req->source_namespace_id()); if (!source_oid.ok()) { return SetupError(resp->mutable_error(), MasterErrorPB::NAMESPACE_NOT_FOUND, source_oid.status()); } for (const auto& iter : *table_ids_map_) { const auto& table_id = iter.first; const auto& table = iter.second; if (IsPgsqlId(table_id) && CHECK_RESULT(GetPgsqlDatabaseOid(table_id)) == *source_oid) { // Since indexes have dependencies on the base tables, put the tables in the front. const bool is_table = table->indexed_table_id().empty(); pgsql_tables.insert(is_table ? pgsql_tables.begin() : pgsql_tables.end(), table); } } scoped_refptr<NamespaceInfo> source_ns = FindPtrOrNull(namespace_ids_map_, req->source_namespace_id()); if (!source_ns) { return SetupError(resp->mutable_error(), MasterErrorPB::NAMESPACE_NOT_FOUND, STATUS(NotFound, "Source keyspace not found", req->source_namespace_id())); } auto source_ns_lock = source_ns->LockForRead(); metadata->set_next_pg_oid(source_ns_lock->pb.next_pg_oid()); } } // NS with a Transaction should be rolled back if the transaction does not get Committed. // Store this on the NS for now and use it later. if (req->has_transaction() && PREDICT_TRUE(FLAGS_enable_transactional_ddl_gc)) { metadata->mutable_transaction()->CopyFrom(req->transaction()); txn = VERIFY_RESULT(TransactionMetadata::FromPB(req->transaction())); RSTATUS_DCHECK(!txn.status_tablet.empty(), Corruption, "Given incomplete Transaction"); } // Add the namespace to the in-memory map for the assignment. namespace_ids_map_[ns->id()] = ns; namespace_names_mapper_[db_type][req->name()] = ns; resp->set_id(ns->id()); } TRACE("Inserted new keyspace info into CatalogManager maps"); // Update the on-disk system catalog. return_status = sys_catalog_->Upsert(leader_ready_term(), ns);; if (!return_status.ok()) { LOG(WARNING) << "Keyspace creation failed:" << return_status.ToString(); { LockGuard lock(mutex_); namespace_ids_map_.erase(ns->id()); namespace_names_mapper_[db_type].erase(req->name()); } ns->mutable_metadata()->AbortMutation(); return CheckIfNoLongerLeaderAndSetupError(return_status, resp); } TRACE("Wrote keyspace to sys-catalog"); // Commit the namespace in-memory state. ns->mutable_metadata()->CommitMutation(); LOG(INFO) << "Created keyspace " << ns->ToString(); if (req->has_creator_role_name()) { RETURN_NOT_OK(permissions_manager_->GrantPermissions( req->creator_role_name(), get_canonical_keyspace(req->name()), req->name() /* resource name */, req->name() /* keyspace name */, all_permissions_for_resource(ResourceType::KEYSPACE), ResourceType::KEYSPACE, resp)); } // Colocated databases need to create a parent tablet to serve as the base storage location. if (req->colocated()) { CreateTableRequestPB req; CreateTableResponsePB resp; const auto parent_table_id = ns->id() + kColocatedParentTableIdSuffix; const auto parent_table_name = ns->id() + kColocatedParentTableNameSuffix; req.set_name(parent_table_name); req.set_table_id(parent_table_id); req.mutable_namespace_()->set_name(ns->name()); req.mutable_namespace_()->set_id(ns->id()); req.set_table_type(GetTableTypeForDatabase(ns->database_type())); req.set_colocated(true); YBSchemaBuilder schemaBuilder; schemaBuilder.AddColumn("parent_column")->Type(BINARY)->PrimaryKey()->NotNull(); YBSchema ybschema; CHECK_OK(schemaBuilder.Build(&ybschema)); auto schema = yb::client::internal::GetSchema(ybschema); SchemaToPB(schema, req.mutable_schema()); req.mutable_schema()->mutable_table_properties()->set_is_transactional(true); // create a parent table, which will create the tablet. Status s = CreateTable(&req, &resp, rpc); // We do not lock here so it is technically possible that the table was already created. // If so, there is nothing to do so we just ignore the "AlreadyPresent" error. if (!s.ok() && !s.IsAlreadyPresent()) { LOG(WARNING) << "Keyspace creation failed:" << s.ToString(); // TODO: We should verify this behavior works end-to-end. // Diverging in-memory state from disk so the user can issue a delete if no new leader. auto l = ns->LockForWrite(); SysNamespaceEntryPB& metadata = ns->mutable_metadata()->mutable_dirty()->pb; metadata.set_state(SysNamespaceEntryPB::FAILED); l.Commit(); return s; } } if ((db_type == YQL_DATABASE_PGSQL && !pgsql_tables.empty()) || PREDICT_FALSE(GetAtomicFlag(&FLAGS_TEST_hang_on_namespace_transition))) { // Process the subsequent work in the background thread (normally PGSQL). LOG(INFO) << "Keyspace create enqueued for later processing: " << ns->ToString(); RETURN_NOT_OK(background_tasks_thread_pool_->SubmitFunc( std::bind(&CatalogManager::ProcessPendingNamespace, this, ns->id(), pgsql_tables, txn))); return Status::OK(); } else { // All work is done, it's now safe to online the namespace (normally YQL). auto l = ns->LockForWrite(); SysNamespaceEntryPB& metadata = ns->mutable_metadata()->mutable_dirty()->pb; if (metadata.state() == SysNamespaceEntryPB::PREPARING) { metadata.set_state(SysNamespaceEntryPB::RUNNING); return_status = sys_catalog_->Upsert(leader_ready_term(), ns);; if (!return_status.ok()) { // Diverging in-memory state from disk so the user can issue a delete if no new leader. LOG(WARNING) << "Keyspace creation failed:" << return_status.ToString(); metadata.set_state(SysNamespaceEntryPB::FAILED); return_status = CheckIfNoLongerLeaderAndSetupError(return_status, resp); } else { TRACE("Activated keyspace in sys-catalog"); LOG(INFO) << "Activated keyspace: " << ns->ToString(); } // Commit the namespace in-memory state. l.Commit(); } else { LOG(WARNING) << "Keyspace has invalid state (" << metadata.state() << "), aborting create"; } } return return_status; } void CatalogManager::ProcessPendingNamespace( NamespaceId id, std::vector<scoped_refptr<TableInfo>> template_tables, TransactionMetadata txn) { LOG(INFO) << "ProcessPendingNamespace started for " << id; // Ensure that we are currently the Leader before handling DDL operations. { SCOPED_LEADER_SHARED_LOCK(l, this); if (!l.catalog_status().ok()) { LOG(WARNING) << "Catalog status failure: " << l.catalog_status().ToString(); // Don't try again, we have to reset in-memory state after losing leader election. return; } if (!l.leader_status().ok()) { LOG(WARNING) << "Leader status failure: " << l.leader_status().ToString(); // Don't try again, we have to reset in-memory state after losing leader election. return; } } if (PREDICT_FALSE(GetAtomicFlag(&FLAGS_TEST_hang_on_namespace_transition))) { LOG(INFO) << "Artificially waiting (" << FLAGS_catalog_manager_bg_task_wait_ms << "ms) on namespace creation for " << id; SleepFor(MonoDelta::FromMilliseconds(FLAGS_catalog_manager_bg_task_wait_ms)); WARN_NOT_OK(background_tasks_thread_pool_->SubmitFunc( std::bind(&CatalogManager::ProcessPendingNamespace, this, id, template_tables, txn)), "Could not submit ProcessPendingNamespaces to thread pool"); return; } scoped_refptr<NamespaceInfo> ns; { LockGuard lock(mutex_); ns = FindPtrOrNull(namespace_ids_map_, id);; } if (ns == nullptr) { LOG(WARNING) << "Pending Namespace not found to finish creation: " << id; return; } // Copy the system tables necessary to create this namespace. This can be time-intensive. bool success = true; if (!template_tables.empty()) { auto s = CopyPgsqlSysTables(ns->id(), template_tables); WARN_NOT_OK(s, "Error Copying PGSQL System Tables for Pending Namespace"); success = s.ok(); } // All work is done, change the namespace state regardless of success or failure. { auto l = ns->LockForWrite(); SysNamespaceEntryPB& metadata = ns->mutable_metadata()->mutable_dirty()->pb; if (metadata.state() == SysNamespaceEntryPB::PREPARING) { metadata.set_state(success ? SysNamespaceEntryPB::RUNNING : SysNamespaceEntryPB::FAILED); auto s = sys_catalog_->Upsert(leader_ready_term(), ns);; if (s.ok()) { TRACE("Done processing keyspace"); LOG(INFO) << (success ? "Processed" : "Failed") << " keyspace: " << ns->ToString(); // Verify Transaction gets committed, which occurs after namespace create finishes. if (success && metadata.has_transaction()) { LOG(INFO) << "Enqueuing keyspace for Transaction Verification: " << ns->ToString(); std::function<Status(bool)> when_done = std::bind(&CatalogManager::VerifyNamespacePgLayer, this, ns, _1); WARN_NOT_OK(background_tasks_thread_pool_->SubmitFunc( std::bind(&YsqlTransactionDdl::VerifyTransaction, &ysql_transaction_, txn, when_done)), "Could not submit VerifyTransaction to thread pool"); } } else { metadata.set_state(SysNamespaceEntryPB::FAILED); if (s.IsIllegalState() || s.IsAborted()) { s = STATUS(ServiceUnavailable, "operation requested can only be executed on a leader master, but this" " master is no longer the leader", s.ToString()); } else { s = s.CloneAndPrepend(Substitute( "An error occurred while modifying keyspace to $0 in sys-catalog: $1", metadata.state(), s.ToString())); } LOG(WARNING) << s.ToString(); } // Commit the namespace in-memory state. l.Commit(); } else { LOG(WARNING) << "Bad keyspace state (" << metadata.state() << "), abandoning creation work for " << ns->ToString(); } } } Status CatalogManager::VerifyNamespacePgLayer( scoped_refptr<NamespaceInfo> ns, bool rpc_success) { // Upon Transaction completion, check pg system table using OID to ensure SUCCESS. const auto pg_table_id = GetPgsqlTableId(atoi(kSystemNamespaceId), kPgDatabaseTableOid); auto entry_exists = VERIFY_RESULT( ysql_transaction_.PgEntryExists(pg_table_id, GetPgsqlDatabaseOid(ns->id()))); auto l = ns->LockForWrite(); SysNamespaceEntryPB& metadata = ns->mutable_metadata()->mutable_dirty()->pb; // #5981: Mark un-retryable rpc failures as pass to avoid infinite retry of GC'd txns. bool txn_check_passed = entry_exists || !rpc_success; if (txn_check_passed) { // Passed checks. Remove the transaction from the entry since we're done processing it. SCHECK_EQ(metadata.state(), SysNamespaceEntryPB::RUNNING, Aborted, Substitute("Invalid Namespace state ($0), abandoning transaction GC work for $1", SysNamespaceEntryPB_State_Name(metadata.state()), ns->ToString())); metadata.clear_transaction(); RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), ns)); if (entry_exists) { LOG(INFO) << "Namespace transaction succeeded: " << ns->ToString(); } else { LOG(WARNING) << "Unknown RPC Failure, removing transaction on namespace: " << ns->ToString(); } // Commit the namespace in-memory state. l.Commit(); } else { // Transaction failed. We need to delete this Database now. SCHECK(metadata.state() == SysNamespaceEntryPB::RUNNING || metadata.state() == SysNamespaceEntryPB::FAILED, Aborted, Substitute("Invalid Namespace state ($0), aborting delete.", SysNamespaceEntryPB_State_Name(metadata.state()), ns->ToString())); LOG(INFO) << "Namespace transaction failed, deleting: " << ns->ToString(); metadata.set_state(SysNamespaceEntryPB::DELETING); metadata.clear_transaction(); RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), ns)); // Commit the namespace in-memory state. l.Commit(); // Async enqueue delete. RETURN_NOT_OK(background_tasks_thread_pool_->SubmitFunc( std::bind(&CatalogManager::DeleteYsqlDatabaseAsync, this, ns))); } return Status::OK(); } // Get the information about an in-progress create operation. Status CatalogManager::IsCreateNamespaceDone(const IsCreateNamespaceDoneRequestPB* req, IsCreateNamespaceDoneResponsePB* resp) { auto ns_pb = req->namespace_(); // 1. Lookup the namespace and verify it exists. TRACE("Looking up keyspace"); auto ns = VERIFY_NAMESPACE_FOUND(FindNamespace(ns_pb), resp); TRACE("Locking keyspace"); auto l = ns->LockForRead(); auto metadata = l->pb; switch (metadata.state()) { // Success cases. Done and working. case SysNamespaceEntryPB::RUNNING: if (!ns->colocated()) { resp->set_done(true); } else { // Verify system table created as well, if colocated. IsCreateTableDoneRequestPB table_req; IsCreateTableDoneResponsePB table_resp; const auto parent_table_id = ns->id() + kColocatedParentTableIdSuffix; table_req.mutable_table()->set_table_id(parent_table_id); auto s = IsCreateTableDone(&table_req, &table_resp); resp->set_done(table_resp.done()); if (!s.ok()) { if (table_resp.has_error()) { resp->mutable_error()->Swap(table_resp.mutable_error()); } return s; } } break; // These states indicate that a create completed but a subsequent remove was requested. case SysNamespaceEntryPB::DELETING: case SysNamespaceEntryPB::DELETED: resp->set_done(true); break; // Pending cases. NOT DONE case SysNamespaceEntryPB::PREPARING: resp->set_done(false); break; // Failure cases. Done, but we need to give the user an error message. case SysNamespaceEntryPB::FAILED: resp->set_done(true); return SetupError(resp->mutable_error(), MasterErrorPB::UNKNOWN_ERROR, STATUS(InternalError, "Namespace Create Failed: not onlined.")); default: Status s = STATUS_SUBSTITUTE(IllegalState, "IsCreateNamespaceDone failure: state=$0", SysNamespaceEntryPB_State_Name(metadata.state())); LOG(WARNING) << s.ToString(); resp->set_done(true); return SetupError(resp->mutable_error(), MasterErrorPB::UNKNOWN_ERROR, s); } return Status::OK(); } Status CatalogManager::DeleteNamespace(const DeleteNamespaceRequestPB* req, DeleteNamespaceResponsePB* resp, rpc::RpcContext* rpc) { auto status = DoDeleteNamespace(req, resp, rpc); if (!status.ok()) { return SetupError(resp->mutable_error(), status); } return status; } Status CatalogManager::DoDeleteNamespace(const DeleteNamespaceRequestPB* req, DeleteNamespaceResponsePB* resp, rpc::RpcContext* rpc) { LOG(INFO) << "Servicing DeleteNamespace request from " << RequestorString(rpc) << ": " << req->ShortDebugString(); // Lookup the namespace and verify if it exists. TRACE("Looking up keyspace"); auto ns = VERIFY_RESULT(FindNamespace(req->namespace_())); if (req->has_database_type() && req->database_type() != ns->database_type()) { // Could not find the right database to delete. return STATUS(NotFound, "Keyspace not found", ns->name(), MasterError(MasterErrorPB::NAMESPACE_NOT_FOUND)); } { // Don't allow deletion if the namespace is in a transient state. auto cur_state = ns->state(); if (cur_state != SysNamespaceEntryPB::RUNNING && cur_state != SysNamespaceEntryPB::FAILED) { if (cur_state == SysNamespaceEntryPB::DELETED) { return STATUS(NotFound, "Keyspace already deleted", ns->name(), MasterError(MasterErrorPB::NAMESPACE_NOT_FOUND)); } else { return STATUS_EC_FORMAT( TryAgain, MasterError(MasterErrorPB::IN_TRANSITION_CAN_RETRY), "Namespace deletion not allowed when State = $0", SysNamespaceEntryPB::State_Name(cur_state)); } } } // PGSQL has a completely forked implementation because it allows non-empty namespaces on delete. if (ns->database_type() == YQL_DATABASE_PGSQL) { return DeleteYsqlDatabase(req, resp, rpc); } TRACE("Locking keyspace"); auto l = ns->LockForWrite(); // Only empty namespace can be deleted. TRACE("Looking for tables in the keyspace"); { SharedLock lock(mutex_); VLOG_WITH_FUNC(3) << "Acquired the catalog manager lock"; for (const TableInfoMap::value_type& entry : *table_ids_map_) { auto ltm = entry.second->LockForRead(); if (!ltm->started_deleting() && ltm->namespace_id() == ns->id()) { return STATUS_EC_FORMAT( InvalidArgument, MasterError(MasterErrorPB::NAMESPACE_IS_NOT_EMPTY), "Cannot delete keyspace which has $0: $1 [id=$2], request: $3", IsTable(ltm->pb) ? "table" : "index", ltm->name(), entry.second->id(), req->ShortDebugString()); } } // Only empty namespace can be deleted. TRACE("Looking for types in the keyspace"); for (const UDTypeInfoMap::value_type& entry : udtype_ids_map_) { auto ltm = entry.second->LockForRead(); if (ltm->namespace_id() == ns->id()) { return STATUS_EC_FORMAT( InvalidArgument, MasterError(MasterErrorPB::NAMESPACE_IS_NOT_EMPTY), "Cannot delete keyspace which has type: $0 [id=$1], request: $2", ltm->name(), entry.second->id(), req->ShortDebugString()); } } } // Disallow deleting namespaces with snapshot schedules. auto map = VERIFY_RESULT(MakeSnapshotSchedulesToObjectIdsMap(SysRowEntry::NAMESPACE)); for (const auto& schedule_and_objects : map) { for (const auto& id : schedule_and_objects.second) { if (id == ns->id()) { return STATUS_EC_FORMAT( InvalidArgument, MasterError(MasterErrorPB::NAMESPACE_IS_NOT_EMPTY), "Cannot delete keyspace which has schedule: $0, request: $1", schedule_and_objects.first, req->ShortDebugString()); } } } // [Delete]. Skip the DELETING->DELETED state, since no tables are present in this namespace. TRACE("Updating metadata on disk"); // Update sys-catalog. Status s = sys_catalog_->Delete(leader_ready_term(), ns); if (!s.ok()) { // The mutation will be aborted when 'l' exits the scope on early return. s = s.CloneAndPrepend("An error occurred while updating sys-catalog"); LOG(WARNING) << s; return CheckIfNoLongerLeader(s); } // Update the in-memory state. TRACE("Committing in-memory state"); l.Commit(); // Remove the namespace from all CatalogManager mappings. { LockGuard lock(mutex_); if (namespace_names_mapper_[ns->database_type()].erase(ns->name()) < 1) { LOG(WARNING) << Format("Could not remove namespace from names map, id=$1", ns->id()); } if (namespace_ids_map_.erase(ns->id()) < 1) { LOG(WARNING) << Format("Could not remove namespace from ids map, id=$1", ns->id()); } } // Delete any permissions granted on this keyspace to any role. See comment in DeleteTable() for // more details. string canonical_resource = get_canonical_keyspace(req->namespace_().name()); RETURN_NOT_OK(permissions_manager_->RemoveAllPermissionsForResource(canonical_resource, resp)); LOG(INFO) << "Successfully deleted keyspace " << ns->ToString() << " per request from " << RequestorString(rpc); return Status::OK(); } void CatalogManager::DeleteYcqlDatabaseAsync(scoped_refptr<NamespaceInfo> database) { TRACE("Locking keyspace"); auto l = database->LockForWrite(); // Only empty namespace can be deleted. TRACE("Looking for tables in the keyspace"); { SharedLock lock(mutex_); VLOG_WITH_FUNC(3) << "Acquired the catalog manager lock"; for (const TableInfoMap::value_type& entry : *table_ids_map_) { auto ltm = entry.second->LockForRead(); if (!ltm->started_deleting() && ltm->namespace_id() == database->id()) { LOG(WARNING) << "Cannot delete keyspace which has " << ltm->name() << " with id=" << entry.second->id(); return; } } } // Only empty namespace can be deleted. TRACE("Looking for types in the keyspace"); { SharedLock lock(mutex_); VLOG_WITH_FUNC(3) << "Acquired the catalog manager lock"; for (const UDTypeInfoMap::value_type& entry : udtype_ids_map_) { auto ltm = entry.second->LockForRead(); if (ltm->namespace_id() == database->id()) { LOG(WARNING) << "Cannot delete keyspace which has type: " << ltm->name() << " with id=" << entry.second->id(); return; } } } // [Delete]. Skip the DELETING->DELETED state, since no tables are present in this namespace. TRACE("Updating metadata on disk"); // Update sys-catalog. Status s = sys_catalog_->Delete(leader_ready_term(), database); if (!s.ok()) { // The mutation will be aborted when 'l' exits the scope on early return. s = s.CloneAndPrepend(Substitute("An error occurred while updating sys-catalog: $0", s.ToString())); LOG(WARNING) << s.ToString(); return; } // Update the in-memory state. TRACE("Committing in-memory state"); l.Commit(); // Remove the namespace from all CatalogManager mappings. { LockGuard lock(mutex_); namespace_names_mapper_[database->database_type()].erase(database->name()); if (namespace_ids_map_.erase(database->id()) < 1) { LOG(WARNING) << Format("Could not remove namespace from maps, id=$1", database->id()); } } // Delete any permissions granted on this keyspace to any role. See comment in DeleteTable() for // more details. string canonical_resource = get_canonical_keyspace(database->name()); DeleteNamespaceResponsePB resp; s = permissions_manager_->RemoveAllPermissionsForResource(canonical_resource, &resp); if (s.ok()) { LOG(INFO) << "Successfully deleted keyspace " << database->ToString(); } else { LOG(WARNING) << "Error deleting keyspace " << database->ToString() << ": " << s; } } Status CatalogManager::DeleteYsqlDatabase(const DeleteNamespaceRequestPB* req, DeleteNamespaceResponsePB* resp, rpc::RpcContext* rpc) { // Lookup database. auto database = VERIFY_NAMESPACE_FOUND(FindNamespace(req->namespace_()), resp); // Make sure this is a YSQL database. if (database->database_type() != YQL_DATABASE_PGSQL) { // A non-YSQL namespace is found, but the rpc requests to drop a YSQL database. Status s = STATUS(NotFound, "YSQL database not found", database->name()); return SetupError(resp->mutable_error(), MasterErrorPB::NAMESPACE_NOT_FOUND, s); } // Set the Namespace to DELETING. TRACE("Locking database"); auto l = database->LockForWrite(); SysNamespaceEntryPB &metadata = database->mutable_metadata()->mutable_dirty()->pb; if (metadata.state() == SysNamespaceEntryPB::RUNNING || metadata.state() == SysNamespaceEntryPB::FAILED) { metadata.set_state(SysNamespaceEntryPB::DELETING); RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), database)); TRACE("Marked keyspace for deletion in sys-catalog"); // Commit the namespace in-memory state. l.Commit(); } else { Status s = STATUS_SUBSTITUTE(IllegalState, "Keyspace ($0) has invalid state ($1), aborting delete", database->name(), metadata.state()); return SetupError(resp->mutable_error(), MasterErrorPB::INTERNAL_ERROR, s); } return background_tasks_thread_pool_->SubmitFunc( std::bind(&CatalogManager::DeleteYsqlDatabaseAsync, this, database)); } void CatalogManager::DeleteYsqlDatabaseAsync(scoped_refptr<NamespaceInfo> database) { TEST_PAUSE_IF_FLAG(TEST_hang_on_namespace_transition); // Lock database before removing content. TRACE("Locking database"); auto l = database->LockForWrite(); SysNamespaceEntryPB &metadata = database->mutable_metadata()->mutable_dirty()->pb; // A DELETED Namespace has finished but was tombstoned to avoid immediately reusing the same ID. // We consider a restart enough time, so we just need to remove it from the SysCatalog. if (metadata.state() == SysNamespaceEntryPB::DELETED) { Status s = sys_catalog_->Delete(leader_ready_term(), database); WARN_NOT_OK(s, "SysCatalog DeleteItem for Namespace"); if (!s.ok()) { return; } } else if (metadata.state() == SysNamespaceEntryPB::DELETING) { // Delete all tables in the database. TRACE("Delete all tables in YSQL database"); Status s = DeleteYsqlDBTables(database); WARN_NOT_OK(s, "DeleteYsqlDBTables failed"); if (!s.ok()) { // Move to FAILED so DeleteNamespace can be reissued by the user. metadata.set_state(SysNamespaceEntryPB::FAILED); l.Commit(); return; } // Once all user-facing data has been offlined, move the Namespace to DELETED state. metadata.set_state(SysNamespaceEntryPB::DELETED); s = sys_catalog_->Upsert(leader_ready_term(), database);; WARN_NOT_OK(s, "SysCatalog Update for Namespace"); if (!s.ok()) { // Move to FAILED so DeleteNamespace can be reissued by the user. metadata.set_state(SysNamespaceEntryPB::FAILED); l.Commit(); return; } TRACE("Marked keyspace as deleted in sys-catalog"); } else { LOG(WARNING) << "Keyspace (" << database->name() << ") has invalid state (" << metadata.state() << "), aborting delete"; return; } // Remove namespace from CatalogManager name mapping. Will remove ID map after all Tables gone. { LockGuard lock(mutex_); if (namespace_names_mapper_[database->database_type()].erase(database->name()) < 1) { LOG(WARNING) << Format("Could not remove namespace from maps, name=$0, id=$1", database->name(), database->id()); } } // Update the in-memory state. TRACE("Committing in-memory state"); l.Commit(); // DROP completed. Return status. LOG(INFO) << "Successfully deleted YSQL database " << database->ToString(); } // IMPORTANT: If modifying, consider updating DeleteTable(), the singular deletion API. Status CatalogManager::DeleteYsqlDBTables(const scoped_refptr<NamespaceInfo>& database) { TabletInfoPtr sys_tablet_info; vector<pair<scoped_refptr<TableInfo>, TableInfo::WriteLock>> tables; unordered_set<TableId> sys_table_ids; { // Lock the catalog to iterate over table_ids_map_. SharedLock lock(mutex_); sys_tablet_info = tablet_map_->find(kSysCatalogTabletId)->second; // Populate tables and sys_table_ids. for (const TableInfoMap::value_type& entry : *table_ids_map_) { scoped_refptr<TableInfo> table = entry.second; auto l = table->LockForWrite(); if (l->namespace_id() != database->id() || l->started_deleting()) { continue; } RSTATUS_DCHECK( !l->pb.is_pg_shared_table(), Corruption, "Shared table found in database"); if (IsSystemTable(*table)) { sys_table_ids.insert(table->id()); } // For regular (indexed) table, insert table info and lock in the front of the list. Else for // index table, append them to the end. We do so so that we will commit and delete the indexed // table first before its indexes. if (IsTable(l->pb)) { tables.insert(tables.begin(), {table, std::move(l)}); } else { tables.push_back({table, std::move(l)}); } } } // Remove the system tables from RAFT. TRACE("Sending system table delete RPCs"); for (auto &table_id : sys_table_ids) { RETURN_NOT_OK(sys_catalog_->DeleteYsqlSystemTable(table_id)); } // Remove the system tables from the system catalog TabletInfo. RETURN_NOT_OK(RemoveTableIdsFromTabletInfo(sys_tablet_info, sys_table_ids)); // Batch remove all relevant CDC streams. Handle before we delete the tables they reference. TRACE("Deleting CDC streams on table"); vector<TableId> id_list; id_list.reserve(tables.size()); for (auto &table_and_lock : tables) { id_list.push_back(table_and_lock.first->id()); } RETURN_NOT_OK(DeleteCDCStreamsForTables(id_list)); // Set all table states to DELETING as one batch RPC call. TRACE("Sending delete table batch RPC to sys catalog"); vector<TableInfo *> tables_rpc; tables_rpc.reserve(tables.size()); for (auto &table_and_lock : tables) { tables_rpc.push_back(table_and_lock.first.get()); auto &l = table_and_lock.second; // Mark the table state as DELETING tablets. l.mutable_data()->set_state(SysTablesEntryPB::DELETING, Substitute("Started deleting at $0", LocalTimeAsString())); } // Update all the table states in raft in bulk. Status s = sys_catalog_->Upsert(leader_ready_term(), tables_rpc);; if (!s.ok()) { // The mutation will be aborted when 'l' exits the scope on early return. s = s.CloneAndPrepend(Substitute("An error occurred while updating sys tables: $0", s.ToString())); LOG(WARNING) << s.ToString(); return CheckIfNoLongerLeader(s); } for (auto &table_and_lock : tables) { auto &table = table_and_lock.first; auto &l = table_and_lock.second; // Cancel all table busywork and commit the DELETING change. l.Commit(); table->AbortTasks(); } // Send a DeleteTablet() RPC request to each tablet replica in the table. for (auto &table_and_lock : tables) { auto &table = table_and_lock.first; // TODO(pitr) undelete for YSQL tables RETURN_NOT_OK(DeleteTabletsAndSendRequests(table, {})); } // Invoke any background tasks and return (notably, table cleanup). background_tasks_->Wake(); return Status::OK(); } // Get the information about an in-progress delete operation. Status CatalogManager::IsDeleteNamespaceDone(const IsDeleteNamespaceDoneRequestPB* req, IsDeleteNamespaceDoneResponsePB* resp) { auto ns_pb = req->namespace_(); // Lookup the namespace and verify it exists. TRACE("Looking up keyspace"); auto ns = FindNamespace(ns_pb); if (!ns.ok()) { // Namespace no longer exists means success. LOG(INFO) << "Servicing IsDeleteNamespaceDone request for " << ns_pb.DebugString() << ": deleted (not found)"; resp->set_done(true); return Status::OK(); } TRACE("Locking keyspace"); auto l = (**ns).LockForRead(); auto& metadata = l->pb; if (metadata.state() == SysNamespaceEntryPB::DELETED) { resp->set_done(true); } else if (metadata.state() == SysNamespaceEntryPB::DELETING) { resp->set_done(false); } else { Status s = STATUS_SUBSTITUTE(IllegalState, "Servicing IsDeleteNamespaceDone request for $0: NOT deleted (state=$1)", ns_pb.DebugString(), metadata.state()); LOG(WARNING) << s.ToString(); // Done != Successful. We just want to let the user know the delete has finished processing. resp->set_done(true); return SetupError(resp->mutable_error(), MasterErrorPB::INTERNAL_ERROR, s); } return Status::OK(); } Status CatalogManager::AlterNamespace(const AlterNamespaceRequestPB* req, AlterNamespaceResponsePB* resp, rpc::RpcContext* rpc) { LOG(INFO) << "Servicing AlterNamespace request from " << RequestorString(rpc) << ": " << req->ShortDebugString(); auto database = VERIFY_NAMESPACE_FOUND(FindNamespace(req->namespace_()), resp); if (req->namespace_().has_database_type() && database->database_type() != req->namespace_().database_type()) { Status s = STATUS(NotFound, "Database not found", database->name()); return SetupError(resp->mutable_error(), MasterErrorPB::NAMESPACE_NOT_FOUND, s); } TRACE("Locking database"); auto l = database->LockForWrite(); // Don't allow an alter if the namespace isn't running. if (l->pb.state() != SysNamespaceEntryPB::RUNNING) { Status s = STATUS_SUBSTITUTE(TryAgain, "Namespace not running. State = $0", SysNamespaceEntryPB::State_Name(l->pb.state())); return SetupError(resp->mutable_error(), NamespaceMasterError(l->pb.state()), s); } const string old_name = l->pb.name(); if (req->has_new_name() && req->new_name() != old_name) { const string new_name = req->new_name(); // Verify that the new name does not exist. NamespaceIdentifierPB ns_identifier; ns_identifier.set_name(new_name); if (req->namespace_().has_database_type()) { ns_identifier.set_database_type(req->namespace_().database_type()); } // TODO: This check will only work for YSQL once we add support for YSQL namespaces in // namespace_name_map (#1476). LockGuard lock(mutex_); TRACE("Acquired catalog manager lock"); auto ns = FindNamespaceUnlocked(ns_identifier); if (ns.ok() && req->namespace_().has_database_type() && (**ns).database_type() == req->namespace_().database_type()) { Status s = STATUS_SUBSTITUTE(AlreadyPresent, "Keyspace '$0' already exists", (**ns).name()); LOG(WARNING) << "Found keyspace: " << (**ns).id() << ". Failed altering keyspace with error: " << s << " Request:\n" << req->DebugString(); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_ALREADY_PRESENT, s); } namespace_names_mapper_[req->namespace_().database_type()][new_name] = database; namespace_names_mapper_[req->namespace_().database_type()].erase(old_name); l.mutable_data()->pb.set_name(new_name); } RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), database)); TRACE("Committing in-memory state"); l.Commit(); LOG(INFO) << "Successfully altered keyspace " << req->namespace_().name() << " per request from " << RequestorString(rpc); return Status::OK(); } Status CatalogManager::ListNamespaces(const ListNamespacesRequestPB* req, ListNamespacesResponsePB* resp) { NamespaceInfoMap namespace_ids_copy; { SharedLock lock(mutex_); namespace_ids_copy = namespace_ids_map_; } for (const auto& entry : namespace_ids_copy) { const auto& namespace_info = *entry.second; // If the request asks for namespaces for a specific database type, filter by the type. if (req->has_database_type() && namespace_info.database_type() != req->database_type()) { continue; } // Only return RUNNING namespaces. if (namespace_info.state() != SysNamespaceEntryPB::RUNNING) { continue; } NamespaceIdentifierPB *ns = resp->add_namespaces(); ns->set_id(namespace_info.id()); ns->set_name(namespace_info.name()); ns->set_database_type(namespace_info.database_type()); } return Status::OK(); } Status CatalogManager::GetNamespaceInfo(const GetNamespaceInfoRequestPB* req, GetNamespaceInfoResponsePB* resp, rpc::RpcContext* rpc) { LOG(INFO) << __func__ << " from " << RequestorString(rpc) << ": " << req->ShortDebugString(); // Look up the namespace and verify if it exists. TRACE("Looking up namespace"); auto ns = VERIFY_NAMESPACE_FOUND(FindNamespace(req->namespace_()), resp); resp->mutable_namespace_()->set_id(ns->id()); resp->mutable_namespace_()->set_name(ns->name()); resp->mutable_namespace_()->set_database_type(ns->database_type()); resp->set_colocated(ns->colocated()); return Status::OK(); } Status CatalogManager::RedisConfigSet( const RedisConfigSetRequestPB* req, RedisConfigSetResponsePB* resp, rpc::RpcContext* rpc) { DCHECK(req->has_keyword()); const auto& key = req->keyword(); SysRedisConfigEntryPB config_entry; config_entry.set_key(key); *config_entry.mutable_args() = req->args(); bool created = false; TRACE("Acquired catalog manager lock"); LockGuard lock(mutex_); scoped_refptr<RedisConfigInfo> cfg = FindPtrOrNull(redis_config_map_, req->keyword()); if (cfg == nullptr) { created = true; cfg = new RedisConfigInfo(key); redis_config_map_[key] = cfg; } auto wl = cfg->LockForWrite(); wl.mutable_data()->pb = std::move(config_entry); if (created) { CHECK_OK(sys_catalog_->Upsert(leader_ready_term(), cfg)); } else { CHECK_OK(sys_catalog_->Upsert(leader_ready_term(), cfg)); } wl.Commit(); return Status::OK(); } Status CatalogManager::RedisConfigGet( const RedisConfigGetRequestPB* req, RedisConfigGetResponsePB* resp, rpc::RpcContext* rpc) { DCHECK(req->has_keyword()); resp->set_keyword(req->keyword()); TRACE("Acquired catalog manager lock"); SharedLock lock(mutex_); scoped_refptr<RedisConfigInfo> cfg = FindPtrOrNull(redis_config_map_, req->keyword()); if (cfg == nullptr) { Status s = STATUS_SUBSTITUTE(NotFound, "Redis config for $0 does not exists", req->keyword()); return SetupError(resp->mutable_error(), MasterErrorPB::REDIS_CONFIG_NOT_FOUND, s); } auto rci = cfg->LockForRead(); resp->mutable_args()->CopyFrom(rci->pb.args()); return Status::OK(); } Status CatalogManager::CreateUDType(const CreateUDTypeRequestPB* req, CreateUDTypeResponsePB* resp, rpc::RpcContext* rpc) { LOG(INFO) << "CreateUDType from " << RequestorString(rpc) << ": " << req->DebugString(); Status s; scoped_refptr<UDTypeInfo> tp; scoped_refptr<NamespaceInfo> ns; // Lookup the namespace and verify if it exists. if (req->has_namespace_()) { TRACE("Looking up namespace"); ns = VERIFY_NAMESPACE_FOUND(FindNamespace(req->namespace_()), resp); if (ns->database_type() != YQLDatabase::YQL_DATABASE_CQL) { Status s = STATUS(NotFound, "Namespace not found"); return SetupError(resp->mutable_error(), MasterErrorPB::NAMESPACE_NOT_FOUND, s); } } // Get all the referenced types (if any). std::vector<std::string> referenced_udts; for (const QLTypePB& field_type : req->field_types()) { QLType::GetUserDefinedTypeIds(field_type, /* transitive = */ true, &referenced_udts); } { TRACE("Acquired catalog manager lock"); LockGuard lock(mutex_); // Verify that the type does not exist. tp = FindPtrOrNull(udtype_names_map_, std::make_pair(ns->id(), req->name())); if (tp != nullptr) { s = STATUS_SUBSTITUTE(AlreadyPresent, "Type '$0.$1' already exists", ns->name(), req->name()); LOG(WARNING) << "Found type: " << tp->id() << ". Failed creating type with error: " << s.ToString() << " Request:\n" << req->DebugString(); return SetupError(resp->mutable_error(), MasterErrorPB::TYPE_ALREADY_PRESENT, s); } // Verify that all referenced types actually exist. for (const auto& udt_id : referenced_udts) { if (FindPtrOrNull(udtype_ids_map_, udt_id) == nullptr) { // This may be caused by a stale cache (e.g. referenced type name resolves to an old, // deleted type). Return InvalidArgument so query layer will clear cache and retry. s = STATUS_SUBSTITUTE(InvalidArgument, "Type id '$0' referenced by type '$1' does not exist", udt_id, req->name()); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_REQUEST, s); } } // Construct the new type (generate fresh name and set fields). UDTypeId new_id = GenerateIdUnlocked(SysRowEntry::UDTYPE); tp = new UDTypeInfo(new_id); tp->mutable_metadata()->StartMutation(); SysUDTypeEntryPB *metadata = &tp->mutable_metadata()->mutable_dirty()->pb; metadata->set_name(req->name()); metadata->set_namespace_id(ns->id()); for (const string& field_name : req->field_names()) { metadata->add_field_names(field_name); } for (const QLTypePB& field_type : req->field_types()) { metadata->add_field_types()->CopyFrom(field_type); } // Add the type to the in-memory maps. udtype_ids_map_[tp->id()] = tp; udtype_names_map_[std::make_pair(ns->id(), req->name())] = tp; resp->set_id(tp->id()); } TRACE("Inserted new user-defined type info into CatalogManager maps"); // Update the on-disk system catalog. s = sys_catalog_->Upsert(leader_ready_term(), tp);; if (!s.ok()) { s = s.CloneAndPrepend(Substitute( "An error occurred while inserting user-defined type to sys-catalog: $0", s.ToString())); LOG(WARNING) << s.ToString(); return CheckIfNoLongerLeaderAndSetupError(s, resp); } TRACE("Wrote user-defined type to sys-catalog"); // Commit the in-memory state. tp->mutable_metadata()->CommitMutation(); LOG(INFO) << "Created user-defined type " << tp->ToString(); return Status::OK(); } Status CatalogManager::DeleteUDType(const DeleteUDTypeRequestPB* req, DeleteUDTypeResponsePB* resp, rpc::RpcContext* rpc) { LOG(INFO) << "Servicing DeleteUDType request from " << RequestorString(rpc) << ": " << req->ShortDebugString(); scoped_refptr<UDTypeInfo> tp; scoped_refptr<NamespaceInfo> ns; if (!req->has_type()) { Status s = STATUS(InvalidArgument, "No type given", req->DebugString()); return SetupError(resp->mutable_error(), MasterErrorPB::NAMESPACE_NOT_FOUND, s); } // Validate namespace. if (req->type().has_namespace_()) { // Lookup the namespace and verify if it exists. TRACE("Looking up namespace"); ns = VERIFY_NAMESPACE_FOUND(FindNamespace(req->type().namespace_()), resp); } { LockGuard lock(mutex_); TRACE("Acquired catalog manager lock"); if (req->type().has_type_id()) { tp = FindPtrOrNull(udtype_ids_map_, req->type().type_id()); } else if (req->type().has_type_name()) { tp = FindPtrOrNull(udtype_names_map_, {ns->id(), req->type().type_name()}); } if (tp == nullptr) { Status s = STATUS(NotFound, "The type does not exist", req->DebugString()); return SetupError(resp->mutable_error(), MasterErrorPB::TYPE_NOT_FOUND, s); } // Checking if any table uses this type. // TODO: this could be more efficient. for (const TableInfoMap::value_type& entry : *table_ids_map_) { auto ltm = entry.second->LockForRead(); if (!ltm->started_deleting()) { for (const auto &col : ltm->schema().columns()) { if (col.type().main() == DataType::USER_DEFINED_TYPE && col.type().udtype_info().id() == tp->id()) { Status s = STATUS(QLError, Substitute("Cannot delete type '$0.$1'. It is used in column $2 of table $3", ns->name(), tp->name(), col.name(), ltm->name())); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_REQUEST, s); } } } } // Checking if any other type uses this type (i.e. in the case of nested types). // TODO: this could be more efficient. for (const UDTypeInfoMap::value_type& entry : udtype_ids_map_) { auto ltm = entry.second->LockForRead(); for (int i = 0; i < ltm->field_types_size(); i++) { // Only need to check direct (non-transitive) type dependencies here. // This also means we report more precise errors for in-use types. if (QLType::DoesUserDefinedTypeIdExist(ltm->field_types(i), false /* transitive */, tp->id())) { Status s = STATUS(QLError, Substitute("Cannot delete type '$0.$1'. It is used in field $2 of type '$3'", ns->name(), tp->name(), ltm->field_names(i), ltm->name())); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_REQUEST, s); } } } } auto l = tp->LockForWrite(); Status s = sys_catalog_->Delete(leader_ready_term(), tp); if (!s.ok()) { // The mutation will be aborted when 'l' exits the scope on early return. s = s.CloneAndPrepend(Substitute("An error occurred while updating sys-catalog: $0", s.ToString())); LOG(WARNING) << s.ToString(); return CheckIfNoLongerLeaderAndSetupError(s, resp); } // Remove it from the maps. { TRACE("Removing from maps"); LockGuard lock(mutex_); if (udtype_ids_map_.erase(tp->id()) < 1) { PANIC_RPC(rpc, "Could not remove user defined type from map, name=" + l->name()); } if (udtype_names_map_.erase({ns->id(), tp->name()}) < 1) { PANIC_RPC(rpc, "Could not remove user defined type from map, name=" + l->name()); } } // Update the in-memory state. TRACE("Committing in-memory state"); l.Commit(); LOG(INFO) << "Successfully deleted user-defined type " << tp->ToString() << " per request from " << RequestorString(rpc); return Status::OK(); } Status CatalogManager::GetUDTypeInfo(const GetUDTypeInfoRequestPB* req, GetUDTypeInfoResponsePB* resp, rpc::RpcContext* rpc) { LOG(INFO) << "GetUDTypeInfo from " << RequestorString(rpc) << ": " << req->DebugString(); Status s; scoped_refptr<UDTypeInfo> tp; scoped_refptr<NamespaceInfo> ns; if (!req->has_type()) { s = STATUS(InvalidArgument, "Cannot get type, no type identifier given", req->DebugString()); return SetupError(resp->mutable_error(), MasterErrorPB::TYPE_NOT_FOUND, s); } if (req->type().has_type_id()) { tp = FindPtrOrNull(udtype_ids_map_, req->type().type_id()); } else if (req->type().has_type_name() && req->type().has_namespace_()) { // Lookup the type and verify if it exists. TRACE("Looking up namespace"); ns = VERIFY_NAMESPACE_FOUND(FindNamespace(req->type().namespace_()), resp); tp = FindPtrOrNull(udtype_names_map_, std::make_pair(ns->id(), req->type().type_name())); } if (tp == nullptr) { s = STATUS(InvalidArgument, "Couldn't find type", req->DebugString()); return SetupError(resp->mutable_error(), MasterErrorPB::TYPE_NOT_FOUND, s); } { auto type_lock = tp->LockForRead(); UDTypeInfoPB* type_info = resp->mutable_udtype(); type_info->set_name(tp->name()); type_info->set_id(tp->id()); type_info->mutable_namespace_()->set_id(type_lock->namespace_id()); for (int i = 0; i < type_lock->field_names_size(); i++) { type_info->add_field_names(type_lock->field_names(i)); } for (int i = 0; i < type_lock->field_types_size(); i++) { type_info->add_field_types()->CopyFrom(type_lock->field_types(i)); } LOG(INFO) << "Retrieved user-defined type " << tp->ToString(); } return Status::OK(); } Status CatalogManager::ListUDTypes(const ListUDTypesRequestPB* req, ListUDTypesResponsePB* resp) { SharedLock lock(mutex_); // Lookup the namespace and verify that it exists. auto ns = VERIFY_NAMESPACE_FOUND(FindNamespaceUnlocked(req->namespace_()), resp); for (const UDTypeInfoByNameMap::value_type& entry : udtype_names_map_) { auto ltm = entry.second->LockForRead(); // key is a pair <namespace_id, type_name>. if (!ns->id().empty() && ns->id() != entry.first.first) { continue; // Skip types from other namespaces. } UDTypeInfoPB* udtype = resp->add_udtypes(); udtype->set_id(entry.second->id()); udtype->set_name(ltm->name()); for (size_t i = 0; i <= ltm->field_names_size(); i++) { udtype->add_field_names(ltm->field_names(i)); } for (size_t i = 0; i <= ltm->field_types_size(); i++) { udtype->add_field_types()->CopyFrom(ltm->field_types(i)); } if (CHECK_NOTNULL(ns.get())) { auto l = ns->LockForRead(); udtype->mutable_namespace_()->set_id(ns->id()); udtype->mutable_namespace_()->set_name(ns->name()); } } return Status::OK(); } // For non-enterprise builds, this is a no-op. Status CatalogManager::DeleteCDCStreamsForTable(const TableId& table) { return Status::OK(); } Status CatalogManager::DeleteCDCStreamsForTables(const vector<TableId>& table_ids) { return Status::OK(); } bool CatalogManager::CDCStreamExistsUnlocked(const CDCStreamId& stream_id) { return false; } Result<uint64_t> CatalogManager::IncrementYsqlCatalogVersion() { auto l = CHECK_NOTNULL(ysql_catalog_config_.get())->LockForWrite(); uint64_t new_version = l->pb.ysql_catalog_config().version() + 1; l.mutable_data()->pb.mutable_ysql_catalog_config()->set_version(new_version); // Write to sys_catalog and in memory. RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), ysql_catalog_config_)); l.Commit(); return new_version; } Status CatalogManager::InitDbFinished(Status initdb_status, int64_t term) { if (initdb_status.ok()) { LOG(INFO) << "initdb completed successfully"; } else { LOG(ERROR) << "initdb failed: " << initdb_status; } auto l = CHECK_NOTNULL(ysql_catalog_config_.get())->LockForWrite(); auto* mutable_ysql_catalog_config = l.mutable_data()->pb.mutable_ysql_catalog_config(); mutable_ysql_catalog_config->set_initdb_done(true); if (!initdb_status.ok()) { mutable_ysql_catalog_config->set_initdb_error(initdb_status.ToString()); } else { mutable_ysql_catalog_config->clear_initdb_error(); } RETURN_NOT_OK(sys_catalog_->Upsert(term, ysql_catalog_config_)); l.Commit(); return Status::OK(); } CHECKED_STATUS CatalogManager::IsInitDbDone( const IsInitDbDoneRequestPB* req, IsInitDbDoneResponsePB* resp) { auto l = CHECK_NOTNULL(ysql_catalog_config_.get())->LockForRead(); const auto& ysql_catalog_config = l->pb.ysql_catalog_config(); resp->set_pg_proc_exists(pg_proc_exists_.load(std::memory_order_acquire)); resp->set_done(ysql_catalog_config.initdb_done()); if (ysql_catalog_config.has_initdb_error() && !ysql_catalog_config.initdb_error().empty()) { resp->set_initdb_error(ysql_catalog_config.initdb_error()); } return Status::OK(); } Status CatalogManager::GetYsqlCatalogVersion(uint64_t* catalog_version, uint64_t* last_breaking_version) { auto table_info = GetTableInfo(kPgYbCatalogVersionTableId); if (table_info != nullptr) { return sys_catalog_->ReadYsqlCatalogVersion(kPgYbCatalogVersionTableId, catalog_version, last_breaking_version); } auto l = ysql_catalog_config_->LockForRead(); // last_breaking_version is the last version (change) that invalidated ongoing transactions. // If using the old (protobuf-based) version method, we do not have any information about // breaking changes so assuming every change is a breaking change. if (catalog_version) { *catalog_version = l->pb.ysql_catalog_config().version(); } if (last_breaking_version) { *last_breaking_version = l->pb.ysql_catalog_config().version(); } return Status::OK(); } Status CatalogManager::RegisterTsFromRaftConfig(const consensus::RaftPeerPB& peer) { NodeInstancePB instance_pb; instance_pb.set_permanent_uuid(peer.permanent_uuid()); instance_pb.set_instance_seqno(0); TSRegistrationPB registration_pb; auto* common = registration_pb.mutable_common(); *common->mutable_private_rpc_addresses() = peer.last_known_private_addr(); *common->mutable_broadcast_addresses() = peer.last_known_broadcast_addr(); *common->mutable_cloud_info() = peer.cloud_info(); // Todo(Rahul) : May need to be changed when we implement table level overrides. { auto l = cluster_config_->LockForRead(); // If the config has no replication info, use empty string for the placement uuid, otherwise // calculate it from the reported peer. auto placement_uuid = l->pb.has_replication_info() ? VERIFY_RESULT(CatalogManagerUtil::GetPlacementUuidFromRaftPeer( l->pb.replication_info(), peer)) : ""; common->set_placement_uuid(placement_uuid); } return master_->ts_manager()->RegisterTS(instance_pb, registration_pb, master_->MakeCloudInfoPB(), &master_->proxy_cache(), RegisteredThroughHeartbeat::kFalse); } void CatalogManager::ReconcileTabletReplicasInLocalMemoryWithReport( const scoped_refptr<TabletInfo>& tablet, const std::string& sender_uuid, const ConsensusStatePB& consensus_state, const ReportedTabletPB& report) { auto replica_locations = std::make_shared<TabletInfo::ReplicaMap>(); auto prev_rl = tablet->GetReplicaLocations(); for (const consensus::RaftPeerPB& peer : consensus_state.config().peers()) { shared_ptr<TSDescriptor> ts_desc; if (!peer.has_permanent_uuid()) { LOG_WITH_PREFIX(WARNING) << "Missing UUID for peer" << peer.ShortDebugString(); continue; } if (!master_->ts_manager()->LookupTSByUUID(peer.permanent_uuid(), &ts_desc)) { if (!GetAtomicFlag(&FLAGS_enable_register_ts_from_raft)) { LOG_WITH_PREFIX(WARNING) << "Tablet server has never reported in. " << "Not including in replica locations map yet. Peer: " << peer.ShortDebugString() << "; Tablet: " << tablet->ToString(); continue; } LOG_WITH_PREFIX(INFO) << "Tablet server has never reported in. Registering the ts using " << "the raft config. Peer: " << peer.ShortDebugString() << "; Tablet: " << tablet->ToString(); Status s = RegisterTsFromRaftConfig(peer); if (!s.ok()) { LOG_WITH_PREFIX(WARNING) << "Could not register ts from raft config: " << s << " Skip updating the replica map."; continue; } // Guaranteed to find the ts since we just registered. master_->ts_manager()->LookupTSByUUID(peer.permanent_uuid(), &ts_desc); if (!ts_desc.get()) { LOG_WITH_PREFIX(WARNING) << "Could not find ts with uuid " << peer.permanent_uuid() << " after registering from raft config. Skip updating the replica" << " map."; continue; } } // Do not update replicas in the NOT_STARTED or BOOTSTRAPPING state (unless they are stale). bool use_existing = false; const TabletReplica* existing_replica = nullptr; auto it = prev_rl->find(ts_desc->permanent_uuid()); if (it != prev_rl->end()) { existing_replica = &it->second; } if (existing_replica && peer.permanent_uuid() != sender_uuid) { // IsStarting returns true if state == NOT_STARTED or state == BOOTSTRAPPING. use_existing = existing_replica->IsStarting() && !existing_replica->IsStale(); } if (use_existing) { InsertOrDie(replica_locations.get(), existing_replica->ts_desc->permanent_uuid(), *existing_replica); } else { TabletReplica replica; CreateNewReplicaForLocalMemory(ts_desc.get(), &consensus_state, report, &replica); auto result = replica_locations.get()->insert({replica.ts_desc->permanent_uuid(), replica}); LOG_IF(FATAL, !result.second) << "duplicate uuid: " << replica.ts_desc->permanent_uuid(); if (existing_replica) { result.first->second.UpdateDriveInfo(existing_replica->drive_info); } } } // Update the local tablet replica set. This deviates from persistent state during bootstrapping. tablet->SetReplicaLocations(replica_locations); tablet_locations_version_.fetch_add(1, std::memory_order_acq_rel); } void CatalogManager::UpdateTabletReplicaInLocalMemory(TSDescriptor* ts_desc, const ConsensusStatePB* consensus_state, const ReportedTabletPB& report, const scoped_refptr<TabletInfo>& tablet) { TabletReplica replica; CreateNewReplicaForLocalMemory(ts_desc, consensus_state, report, &replica); tablet->UpdateReplicaLocations(replica); tablet_locations_version_.fetch_add(1, std::memory_order_acq_rel); } void CatalogManager::CreateNewReplicaForLocalMemory(TSDescriptor* ts_desc, const ConsensusStatePB* consensus_state, const ReportedTabletPB& report, TabletReplica* new_replica) { // Tablets in state NOT_STARTED or BOOTSTRAPPING don't have a consensus. if (consensus_state == nullptr) { new_replica->role = RaftPeerPB::NON_PARTICIPANT; new_replica->member_type = RaftPeerPB::UNKNOWN_MEMBER_TYPE; } else { CHECK(consensus_state != nullptr) << "No cstate: " << ts_desc->permanent_uuid() << " - " << report.state(); new_replica->role = GetConsensusRole(ts_desc->permanent_uuid(), *consensus_state); new_replica->member_type = GetConsensusMemberType(ts_desc->permanent_uuid(), *consensus_state); } if (report.has_should_disable_lb_move()) { new_replica->should_disable_lb_move = report.should_disable_lb_move(); } if (report.has_fs_data_dir()) { new_replica->fs_data_dir = report.fs_data_dir(); } new_replica->state = report.state(); new_replica->ts_desc = ts_desc; if (!ts_desc->registered_through_heartbeat()) { new_replica->time_updated = MonoTime::Now() - ts_desc->TimeSinceHeartbeat(); } } Status CatalogManager::GetTabletPeer(const TabletId& tablet_id, std::shared_ptr<TabletPeer>* ret_tablet_peer) const { // Note: CatalogManager has only one table, 'sys_catalog', with only // one tablet. if (PREDICT_FALSE(!IsInitialized())) { // Master puts up the consensus service first and then initiates catalog manager's creation // asynchronously. So this case is possible, but harmless. The RPC will simply be retried. // Previously, because we weren't checking for this condition, we would fatal down stream. const string& reason = "CatalogManager is not yet initialized"; YB_LOG_EVERY_N(WARNING, 1000) << reason; return STATUS(ServiceUnavailable, reason); } CHECK(sys_catalog_) << "sys_catalog_ must be initialized!"; if (master_->opts().IsShellMode()) { return STATUS_SUBSTITUTE(NotFound, "In shell mode: no tablet_id $0 exists in CatalogManager.", tablet_id); } if (sys_catalog_->tablet_id() == tablet_id && sys_catalog_->tablet_peer().get() != nullptr && sys_catalog_->tablet_peer()->CheckRunning().ok()) { *ret_tablet_peer = tablet_peer(); } else { return STATUS_SUBSTITUTE(NotFound, "no SysTable in the RUNNING state exists with tablet_id $0 in CatalogManager", tablet_id); } return Status::OK(); } const NodeInstancePB& CatalogManager::NodeInstance() const { return master_->instance_pb(); } Status CatalogManager::GetRegistration(ServerRegistrationPB* reg) const { return master_->GetRegistration(reg, server::RpcOnly::kTrue); } Status CatalogManager::UpdateMastersListInMemoryAndDisk() { DCHECK(master_->opts().IsShellMode()); if (!master_->opts().IsShellMode()) { return STATUS(IllegalState, "Cannot update master's info when process is not in shell mode."); } consensus::ConsensusStatePB consensus_state; RETURN_NOT_OK(GetCurrentConfig(&consensus_state)); if (!consensus_state.has_config()) { return STATUS(NotFound, "No Raft config found."); } RETURN_NOT_OK(sys_catalog_->ConvertConfigToMasterAddresses(consensus_state.config())); RETURN_NOT_OK(sys_catalog_->CreateAndFlushConsensusMeta(master_->fs_manager(), consensus_state.config(), consensus_state.current_term())); return Status::OK(); } Status CatalogManager::EnableBgTasks() { LockGuard lock(mutex_); // Initialize refresh_ysql_tablespace_info_task_. This will be used to // manage the background task that refreshes tablespace info. This task // will be started by the CatalogManagerBgTasks below. refresh_ysql_tablespace_info_task_.Bind(&master_->messenger()->scheduler()); background_tasks_.reset(new CatalogManagerBgTasks(this)); RETURN_NOT_OK_PREPEND(background_tasks_->Init(), "Failed to initialize catalog manager background tasks"); // Add bg thread to rebuild yql system partitions. refresh_yql_partitions_task_.Bind(&master_->messenger()->scheduler()); RETURN_NOT_OK(background_tasks_thread_pool_->SubmitFunc( [this]() { RebuildYQLSystemPartitions(); })); return Status::OK(); } Status CatalogManager::StartRemoteBootstrap(const StartRemoteBootstrapRequestPB& req) { const TabletId& tablet_id = req.tablet_id(); std::unique_lock<std::mutex> l(remote_bootstrap_mtx_, std::try_to_lock); if (!l.owns_lock()) { return STATUS_SUBSTITUTE(AlreadyPresent, "Remote bootstrap of tablet $0 already in progress", tablet_id); } if (!master_->opts().IsShellMode()) { return STATUS(IllegalState, "Cannot bootstrap a master which is not in shell mode."); } LOG(INFO) << "Starting remote bootstrap: " << req.ShortDebugString(); HostPort bootstrap_peer_addr = HostPortFromPB(DesiredHostPort( req.source_broadcast_addr(), req.source_private_addr(), req.source_cloud_info(), master_->MakeCloudInfoPB())); const string& bootstrap_peer_uuid = req.bootstrap_peer_uuid(); int64_t leader_term = req.caller_term(); std::shared_ptr<TabletPeer> old_tablet_peer; RaftGroupMetadataPtr meta; bool replacing_tablet = false; if (tablet_exists_) { old_tablet_peer = tablet_peer(); // Nothing to recover if the remote bootstrap client start failed the last time. if (old_tablet_peer) { meta = old_tablet_peer->tablet_metadata(); replacing_tablet = true; } } if (replacing_tablet) { // Make sure the existing tablet peer is shut down and tombstoned. RETURN_NOT_OK(tserver::HandleReplacingStaleTablet(meta, old_tablet_peer, tablet_id, master_->fs_manager()->uuid(), leader_term)); } LOG_WITH_PREFIX(INFO) << " Initiating remote bootstrap from peer " << bootstrap_peer_uuid << " (" << bootstrap_peer_addr.ToString() << ")."; auto rb_client = std::make_unique<tserver::RemoteBootstrapClient>( tablet_id, master_->fs_manager()); // Download and persist the remote superblock in TABLET_DATA_COPYING state. if (replacing_tablet) { RETURN_NOT_OK(rb_client->SetTabletToReplace(meta, leader_term)); } RETURN_NOT_OK(rb_client->Start( bootstrap_peer_uuid, &master_->proxy_cache(), bootstrap_peer_addr, &meta)); // This SetupTabletPeer is needed by rb_client to perform the remote bootstrap/fetch. // And the SetupTablet below to perform "local bootstrap" cannot be done until the remote fetch // has succeeded. So keeping them seperate for now. sys_catalog_->SetupTabletPeer(meta); if (PREDICT_FALSE(FLAGS_TEST_inject_latency_during_remote_bootstrap_secs)) { LOG(INFO) << "Injecting " << FLAGS_TEST_inject_latency_during_remote_bootstrap_secs << " seconds of latency for test"; SleepFor(MonoDelta::FromSeconds(FLAGS_TEST_inject_latency_during_remote_bootstrap_secs)); } // From this point onward, the superblock is persisted in TABLET_DATA_COPYING // state, and we need to tombstone the tablet if additional steps prior to // getting to a TABLET_DATA_READY state fail. tablet_exists_ = true; // Download all of the remote files. TOMBSTONE_NOT_OK(rb_client->FetchAll(tablet_peer()->status_listener()), meta, master_->fs_manager()->uuid(), Substitute("Remote bootstrap: Unable to fetch data from remote peer $0 ($1)", bootstrap_peer_uuid, bootstrap_peer_addr.ToString()), nullptr); // Write out the last files to make the new replica visible and update the // TabletDataState in the superblock to TABLET_DATA_READY. // Finish() will call EndRemoteSession() and wait for the leader to successfully submit a // ChangeConfig request (to change this master's role from PRE_VOTER or PRE_OBSERVER to VOTER or // OBSERVER respectively). If the RPC times out, we will ignore the error (since the leader could // have successfully submitted the ChangeConfig request and failed to respond before in time) // and check the committed config until we find that this master's role has changed, or until we // time out which will cause us to tombstone the tablet. TOMBSTONE_NOT_OK(rb_client->Finish(), meta, master_->fs_manager()->uuid(), "Remote bootstrap: Failed calling Finish()", nullptr); // Synchronous tablet open for "local bootstrap". RETURN_NOT_OK(tserver::ShutdownAndTombstoneTabletPeerNotOk( sys_catalog_->OpenTablet(meta), sys_catalog_->tablet_peer(), meta, master_->fs_manager()->uuid(), "Remote bootstrap: Failed opening sys catalog")); // Set up the in-memory master list and also flush the cmeta. RETURN_NOT_OK(UpdateMastersListInMemoryAndDisk()); master_->SetShellMode(false); // Call VerifyChangeRoleSucceeded only after we have set shell mode to false. Otherwise, // CatalogManager::GetTabletPeer will always return an error, and the consensus will never get // updated. auto status = rb_client->VerifyChangeRoleSucceeded( sys_catalog_->tablet_peer()->shared_consensus()); if (!status.ok()) { LOG_WITH_PREFIX(WARNING) << "Remote bootstrap finished. " << "Failed calling VerifyChangeRoleSucceeded: " << status.ToString(); } else { LOG_WITH_PREFIX(INFO) << "Remote bootstrap finished successfully"; } LOG(INFO) << "Master completed remote bootstrap and is out of shell mode."; RETURN_NOT_OK(EnableBgTasks()); return Status::OK(); } CHECKED_STATUS CatalogManager::SendAlterTableRequest(const scoped_refptr<TableInfo>& table, const AlterTableRequestPB* req) { vector<scoped_refptr<TabletInfo>> tablets; table->GetAllTablets(&tablets); bool is_ysql_table_with_transaction_metadata = table->GetTableType() == TableType::PGSQL_TABLE_TYPE && req != nullptr && req->has_transaction() && req->transaction().has_transaction_id(); bool alter_table_has_add_or_drop_column_step = false; if (req && (req->alter_schema_steps_size() || req->has_alter_properties())) { for (const AlterTableRequestPB::Step& step : req->alter_schema_steps()) { if (step.type() == AlterTableRequestPB::ADD_COLUMN || step.type() == AlterTableRequestPB::DROP_COLUMN) { alter_table_has_add_or_drop_column_step = true; break; } } } TransactionId txn_id = TransactionId::Nil(); if (is_ysql_table_with_transaction_metadata && alter_table_has_add_or_drop_column_step) { { LOG(INFO) << "Persist transaction metadata into SysTableEntryPB for table ID " << table->id(); TRACE("Locking table"); auto l = table->LockForWrite(); auto& tablet_data = *l.mutable_data(); auto& table_pb = tablet_data.pb; table_pb.mutable_transaction()->CopyFrom(req->transaction()); // Update sys-catalog with the transaction ID. TRACE("Updating table metadata on disk"); RETURN_NOT_OK(master_->catalog_manager()->sys_catalog_->Upsert( master_->catalog_manager()->leader_ready_term(), table.get())); // Update the in-memory state. TRACE("Committing in-memory state"); l.Commit(); } txn_id = VERIFY_RESULT(FullyDecodeTransactionId(req->transaction().transaction_id())); } for (const scoped_refptr<TabletInfo>& tablet : tablets) { auto call = std::make_shared<AsyncAlterTable>(master_, AsyncTaskPool(), tablet, table, txn_id); tablet->table()->AddTask(call); if (PREDICT_FALSE(FLAGS_TEST_slowdown_alter_table_rpcs_ms > 0)) { LOG(INFO) << "Sleeping for " << tablet->id() << FLAGS_TEST_slowdown_alter_table_rpcs_ms << "ms before sending async alter table request"; SleepFor(MonoDelta::FromMilliseconds(FLAGS_TEST_slowdown_alter_table_rpcs_ms)); } RETURN_NOT_OK(ScheduleTask(call)); } return Status::OK(); } void CatalogManager::SendCopartitionTabletRequest(const scoped_refptr<TabletInfo>& tablet, const scoped_refptr<TableInfo>& table) { auto call = std::make_shared<AsyncCopartitionTable>(master_, AsyncTaskPool(), tablet, table); table->AddTask(call); WARN_NOT_OK(ScheduleTask(call), "Failed to send copartition table request"); } Status CatalogManager::SendSplitTabletRequest( const scoped_refptr<TabletInfo>& tablet, std::array<TabletId, kNumSplitParts> new_tablet_ids, const std::string& split_encoded_key, const std::string& split_partition_key) { VLOG(2) << "Scheduling SplitTablet request to leader tserver for source tablet ID: " << tablet->tablet_id() << ", after-split tablet IDs: " << AsString(new_tablet_ids); auto call = std::make_shared<AsyncSplitTablet>( master_, AsyncTaskPool(), tablet, new_tablet_ids, split_encoded_key, split_partition_key, [this, tablet](const Status& status) { if (!status.ok()) { LOG(WARNING) << "AsyncSplitTablet task failed with status: " << status; tablet_split_manager_.RemoveFailedProcessingTabletSplit(tablet->tablet_id()); } }); tablet->table()->AddTask(call); return ScheduleTask(call); } void CatalogManager::DeleteTabletReplicas( TabletInfo* tablet, const std::string& msg, bool hide_only) { auto locations = tablet->GetReplicaLocations(); LOG(INFO) << "Sending DeleteTablet for " << locations->size() << " replicas of tablet " << tablet->tablet_id(); for (const TabletInfo::ReplicaMap::value_type& r : *locations) { SendDeleteTabletRequest(tablet->tablet_id(), TABLET_DATA_DELETED, boost::none, tablet->table(), r.second.ts_desc, msg, hide_only); } } Status CatalogManager::CheckIfForbiddenToDeleteTabletOf(const scoped_refptr<TableInfo>& table) { // Do not delete the system catalog tablet. if (IsSystemTable(*table)) { return STATUS(InvalidArgument, "It is not allowed to delete system tables"); } // Do not delete the tablet of a colocated table. if (IsColocatedUserTable(*table)) { return STATUS(InvalidArgument, "It is not allowed to delete tablets of the colocated tables."); } return Status::OK(); } Status CatalogManager::DeleteTabletsAndSendRequests( const TableInfoPtr& table, const RepeatedBytes& retained_by_snapshot_schedules) { // Silently fail if tablet deletion is forbidden so table deletion can continue executing. if (!CheckIfForbiddenToDeleteTabletOf(table).ok()) { return Status::OK(); } vector<scoped_refptr<TabletInfo>> tablets; table->GetAllTablets(&tablets, IncludeSplitTablets::kTrue); std::sort(tablets.begin(), tablets.end(), [](const auto& lhs, const auto& rhs) { return lhs->tablet_id() < rhs->tablet_id(); }); string deletion_msg = "Table deleted at " + LocalTimeAsString(); RETURN_NOT_OK(DeleteTabletListAndSendRequests( tablets, deletion_msg, retained_by_snapshot_schedules)); if (IsColocatedParentTable(*table)) { SharedLock lock(mutex_); colocated_tablet_ids_map_.erase(table->namespace_id()); } else if (IsTablegroupParentTable(*table)) { // In the case of dropped database/tablegroup parent table, need to delete tablegroup info. SharedLock lock(mutex_); for (auto tgroup : tablegroup_tablet_ids_map_[table->namespace_id()]) { tablegroup_ids_map_.erase(tgroup.first); } tablegroup_tablet_ids_map_.erase(table->namespace_id()); } return Status::OK(); } Status CatalogManager::DeleteTabletListAndSendRequests( const std::vector<scoped_refptr<TabletInfo>>& tablets, const std::string& deletion_msg, const google::protobuf::RepeatedPtrField<std::string>& retained_by_snapshot_schedules) { struct TabletAndLock { TabletInfoPtr tablet; TabletInfo::WriteLock lock; }; std::vector<TabletAndLock> tablets_and_locks; tablets_and_locks.reserve(tablets.size()); std::vector<TabletInfo*> tablet_infos; tablet_infos.reserve(tablets_and_locks.size()); std::vector<TabletInfoPtr> marked_as_hidden; // Grab tablets and tablet write locks. The list should already be in tablet_id sorted order. for (const auto& tablet : tablets) { tablets_and_locks.push_back(TabletAndLock { .tablet = tablet, .lock = tablet->LockForWrite(), }); tablet_infos.emplace_back(tablet.get()); } HideOnly hide_only(!retained_by_snapshot_schedules.empty()); HybridTime hide_hybrid_time = hide_only ? master_->clock()->Now() : HybridTime(); // Mark the tablets as deleted. for (auto& tablet_and_lock : tablets_and_locks) { auto& tablet = tablet_and_lock.tablet; auto& tablet_lock = tablet_and_lock.lock; bool was_hidden = tablet_lock->ListedAsHidden(); if (hide_only) { LOG(INFO) << "Hiding tablet " << tablet->tablet_id() << " ..."; tablet_lock.mutable_data()->pb.set_hide_hybrid_time(hide_hybrid_time.ToUint64()); *tablet_lock.mutable_data()->pb.mutable_retained_by_snapshot_schedules() = retained_by_snapshot_schedules; } else { LOG(INFO) << "Deleting tablet " << tablet->tablet_id() << " ..."; tablet_lock.mutable_data()->set_state(SysTabletsEntryPB::DELETED, deletion_msg); tablet->table()->RemoveSplitTablet(tablet->id()); } if (tablet_lock->ListedAsHidden() && !was_hidden) { marked_as_hidden.push_back(tablet); } } // Update all the tablet states in raft in bulk. RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), tablet_infos)); // Commit the change. for (auto& tablet_and_lock : tablets_and_locks) { auto& tablet = tablet_and_lock.tablet; auto& tablet_lock = tablet_and_lock.lock; tablet_lock.Commit(); LOG(INFO) << (hide_only ? "Hid tablet " : "Deleted tablet ") << tablet->tablet_id(); DeleteTabletReplicas(tablet.get(), deletion_msg, hide_only); } if (!marked_as_hidden.empty()) { LockGuard lock(mutex_); hidden_tablets_.insert(hidden_tablets_.end(), marked_as_hidden.begin(), marked_as_hidden.end()); } return Status::OK(); } void CatalogManager::SendDeleteTabletRequest( const TabletId& tablet_id, TabletDataState delete_type, const boost::optional<int64_t>& cas_config_opid_index_less_or_equal, const scoped_refptr<TableInfo>& table, TSDescriptor* ts_desc, const string& reason, bool hide_only) { if (PREDICT_FALSE(GetAtomicFlag(&FLAGS_TEST_disable_tablet_deletion))) { return; } LOG_WITH_PREFIX(INFO) << (hide_only ? "Hiding" : "Deleting") << " tablet " << tablet_id << " on peer " << ts_desc->permanent_uuid() << " with delete type " << TabletDataState_Name(delete_type) << " (" << reason << ")"; auto call = std::make_shared<AsyncDeleteReplica>(master_, AsyncTaskPool(), ts_desc->permanent_uuid(), table, tablet_id, delete_type, cas_config_opid_index_less_or_equal, reason); if (hide_only) { call->set_hide_only(hide_only); } if (table != nullptr) { table->AddTask(call); } auto status = ScheduleTask(call); WARN_NOT_OK(status, Substitute("Failed to send delete request for tablet $0", tablet_id)); // TODO(bogdan): does the pending delete semantics need to change? if (status.ok()) { ts_desc->AddPendingTabletDelete(tablet_id); } } void CatalogManager::SendLeaderStepDownRequest( const scoped_refptr<TabletInfo>& tablet, const ConsensusStatePB& cstate, const string& change_config_ts_uuid, bool should_remove, const string& new_leader_uuid) { auto task = std::make_shared<AsyncTryStepDown>( master_, AsyncTaskPool(), tablet, cstate, change_config_ts_uuid, should_remove, new_leader_uuid); tablet->table()->AddTask(task); Status status = ScheduleTask(task); WARN_NOT_OK(status, Substitute("Failed to send new $0 request", task->type_name())); } // TODO: refactor this into a joint method with the add one. void CatalogManager::SendRemoveServerRequest( const scoped_refptr<TabletInfo>& tablet, const ConsensusStatePB& cstate, const string& change_config_ts_uuid) { // Check if the user wants the leader to be stepped down. auto task = std::make_shared<AsyncRemoveServerTask>( master_, AsyncTaskPool(), tablet, cstate, change_config_ts_uuid); tablet->table()->AddTask(task); WARN_NOT_OK(ScheduleTask(task), Substitute("Failed to send new $0 request", task->type_name())); } void CatalogManager::SendAddServerRequest( const scoped_refptr<TabletInfo>& tablet, RaftPeerPB::MemberType member_type, const ConsensusStatePB& cstate, const string& change_config_ts_uuid) { auto task = std::make_shared<AsyncAddServerTask>(master_, AsyncTaskPool(), tablet, member_type, cstate, change_config_ts_uuid); tablet->table()->AddTask(task); WARN_NOT_OK( ScheduleTask(task), Substitute("Failed to send AddServer of tserver $0 to tablet $1", change_config_ts_uuid, tablet.get()->ToString())); } void CatalogManager::GetPendingServerTasksUnlocked( const TableId &table_uuid, TabletToTabletServerMap *add_replica_tasks_map, TabletToTabletServerMap *remove_replica_tasks_map, TabletToTabletServerMap *stepdown_leader_tasks_map) { auto table = GetTableInfoUnlocked(table_uuid); for (const auto& task : table->GetTasks()) { TabletToTabletServerMap* outputMap = nullptr; if (task->type() == MonitoredTask::ASYNC_ADD_SERVER) { outputMap = add_replica_tasks_map; } else if (task->type() == MonitoredTask::ASYNC_REMOVE_SERVER) { outputMap = remove_replica_tasks_map; } else if (task->type() == MonitoredTask::ASYNC_TRY_STEP_DOWN) { // Store new_leader_uuid instead of change_config_ts_uuid. auto raft_task = static_cast<AsyncTryStepDown*>(task.get()); (*stepdown_leader_tasks_map)[raft_task->tablet_id()] = raft_task->new_leader_uuid(); continue; } if (outputMap) { auto raft_task = static_cast<CommonInfoForRaftTask*>(task.get()); (*outputMap)[raft_task->tablet_id()] = raft_task->change_config_ts_uuid(); } } } void CatalogManager::ExtractTabletsToProcess( TabletInfos *tablets_to_delete, TableToTabletInfos *tablets_to_process) { SharedLock lock(mutex_); // TODO: At the moment we loop through all the tablets // we can keep a set of tablets waiting for "assignment" // or just a counter to avoid to take the lock and loop through the tablets // if everything is "stable". for (const TabletInfoMap::value_type& entry : *tablet_map_) { scoped_refptr<TabletInfo> tablet = entry.second; auto table = tablet->table(); if (!table) { // Tablet is orphaned or in preparing state, continue. continue; } // acquire table lock before tablets. auto table_lock = table->LockForRead(); auto tablet_lock = tablet->LockForRead(); // If the table is deleted or the tablet was replaced at table creation time. if (tablet_lock->is_deleted() || table_lock->started_deleting()) { // Process this table deletion only once (tombstones for table may remain longer). if (table_ids_map_->find(tablet->table()->id()) != table_ids_map_->end()) { tablets_to_delete->push_back(tablet); } // Don't process deleted tables regardless. continue; } // Running tablets. if (tablet_lock->is_running()) { // TODO: handle last update > not responding timeout? continue; } // Tablets not yet assigned or with a report just received. (*tablets_to_process)[tablet->table()->id()].push_back(tablet); } } bool CatalogManager::AreTablesDeleting() { SharedLock lock(mutex_); for (const TableInfoMap::value_type& entry : *table_ids_map_) { scoped_refptr<TableInfo> table(entry.second); auto table_lock = table->LockForRead(); // TODO(jason): possibly change this to started_deleting when we begin removing DELETED tables // from table_ids_map_ (see CleanUpDeletedTables). if (table_lock->is_deleting()) { return true; } } return false; } struct DeferredAssignmentActions { std::vector<TabletInfo*> modified_tablets; std::vector<TabletInfo*> needs_create_rpc; }; void CatalogManager::HandleAssignPreparingTablet(TabletInfo* tablet, DeferredAssignmentActions* deferred) { // The tablet was just created (probably by a CreateTable RPC). // Update the state to "creating" to be ready for the creation request. tablet->mutable_metadata()->mutable_dirty()->set_state( SysTabletsEntryPB::CREATING, "Sending initial creation of tablet"); deferred->modified_tablets.push_back(tablet); deferred->needs_create_rpc.push_back(tablet); VLOG(1) << "Assign new tablet " << tablet->ToString(); } void CatalogManager::HandleAssignCreatingTablet(TabletInfo* tablet, DeferredAssignmentActions* deferred, vector<scoped_refptr<TabletInfo>>* new_tablets) { MonoDelta time_since_updated = MonoTime::Now().GetDeltaSince(tablet->last_update_time()); int64_t remaining_timeout_ms = FLAGS_tablet_creation_timeout_ms - time_since_updated.ToMilliseconds(); if (tablet->LockForRead()->pb.has_split_parent_tablet_id()) { // No need to recreate post-split tablets, since this is always done on source tablet replicas. VLOG(2) << "Post-split tablet " << AsString(tablet) << " still being created."; return; } // Skip the tablet if the assignment timeout is not yet expired. if (remaining_timeout_ms > 0) { VLOG(2) << "Tablet " << tablet->ToString() << " still being created. " << remaining_timeout_ms << "ms remain until timeout."; return; } const PersistentTabletInfo& old_info = tablet->metadata().state(); // The "tablet creation" was already sent, but we didn't receive an answer // within the timeout. So the tablet will be replaced by a new one. TabletInfo *replacement; { LockGuard lock(mutex_); replacement = CreateTabletInfo(tablet->table().get(), old_info.pb.partition()); } LOG(WARNING) << "Tablet " << tablet->ToString() << " was not created within " << "the allowed timeout. Replacing with a new tablet " << replacement->tablet_id(); tablet->table()->AddTablet(replacement); { LockGuard lock(mutex_); auto tablet_map_checkout = tablet_map_.CheckOut(); (*tablet_map_checkout)[replacement->tablet_id()] = replacement; } // Mark old tablet as replaced. tablet->mutable_metadata()->mutable_dirty()->set_state( SysTabletsEntryPB::REPLACED, Substitute("Replaced by $0 at $1", replacement->tablet_id(), LocalTimeAsString())); // Mark new tablet as being created. replacement->mutable_metadata()->mutable_dirty()->set_state( SysTabletsEntryPB::CREATING, Substitute("Replacement for $0", tablet->tablet_id())); deferred->modified_tablets.push_back(tablet); deferred->modified_tablets.push_back(replacement); deferred->needs_create_rpc.push_back(replacement); VLOG(1) << "Replaced tablet " << tablet->tablet_id() << " with " << replacement->tablet_id() << " (table " << tablet->table()->ToString() << ")"; new_tablets->push_back(replacement); } // TODO: we could batch the IO onto a background thread. Status CatalogManager::HandleTabletSchemaVersionReport( TabletInfo *tablet, uint32_t version, const scoped_refptr<TableInfo>& table_info) { scoped_refptr<TableInfo> table; if (table_info) { table = table_info; } else { table = tablet->table(); } // Update the schema version if it's the latest. tablet->set_reported_schema_version(table->id(), version); VLOG_WITH_PREFIX_AND_FUNC(1) << "Tablet " << tablet->tablet_id() << " reported version " << version; // Verify if it's the last tablet report, and the alter completed. { auto l = table->LockForRead(); if (l->pb.state() != SysTablesEntryPB::ALTERING) { VLOG_WITH_PREFIX_AND_FUNC(2) << "Table " << table->ToString() << " is not altering"; return Status::OK(); } uint32_t current_version = l->pb.version(); if (table->IsAlterInProgress(current_version)) { VLOG_WITH_PREFIX_AND_FUNC(2) << "Table " << table->ToString() << " has IsAlterInProgress (" << current_version << ")"; return Status::OK(); } } return MultiStageAlterTable::LaunchNextTableInfoVersionIfNecessary(this, table, version); } // Helper class to commit TabletInfo mutations at the end of a scope. namespace { class ScopedTabletInfoCommitter { public: explicit ScopedTabletInfoCommitter(const TabletInfos* tablets) : tablets_(DCHECK_NOTNULL(tablets)), aborted_(false) { } // This method is not thread safe. Must be called by the same thread // that would destroy this instance. void Abort() { for (const scoped_refptr<TabletInfo>& tablet : *tablets_) { tablet->mutable_metadata()->AbortMutation(); } aborted_ = true; } void Commit() { if (PREDICT_TRUE(!aborted_)) { for (const scoped_refptr<TabletInfo>& tablet : *tablets_) { tablet->mutable_metadata()->CommitMutation(); } } } // Commit the transactions. ~ScopedTabletInfoCommitter() { Commit(); } private: const TabletInfos* tablets_; bool aborted_; }; } // anonymous namespace Status CatalogManager::ProcessPendingAssignments(const TabletInfos& tablets) { VLOG(1) << "Processing pending assignments"; // Take write locks on all tablets to be processed, and ensure that they are // unlocked at the end of this scope. for (const scoped_refptr<TabletInfo>& tablet : tablets) { tablet->mutable_metadata()->StartMutation(); } ScopedTabletInfoCommitter unlocker_in(&tablets); // Any tablets created by the helper functions will also be created in a // locked state, so we must ensure they are unlocked before we return to // avoid deadlocks. TabletInfos new_tablets; ScopedTabletInfoCommitter unlocker_out(&new_tablets); DeferredAssignmentActions deferred; // Iterate over each of the tablets and handle it, whatever state // it may be in. The actions required for the tablet are collected // into 'deferred'. for (const scoped_refptr<TabletInfo>& tablet : tablets) { SysTabletsEntryPB::State t_state = tablet->metadata().state().pb.state(); switch (t_state) { case SysTabletsEntryPB::PREPARING: HandleAssignPreparingTablet(tablet.get(), &deferred); break; case SysTabletsEntryPB::CREATING: HandleAssignCreatingTablet(tablet.get(), &deferred, &new_tablets); break; default: VLOG(2) << "Nothing to do for tablet " << tablet->tablet_id() << ": state = " << SysTabletsEntryPB_State_Name(t_state); break; } } // Nothing to do. if (deferred.modified_tablets.empty() && deferred.needs_create_rpc.empty()) { return Status::OK(); } // For those tablets which need to be created in this round, assign replicas. TSDescriptorVector ts_descs; { BlacklistSet blacklist = BlacklistSetFromPB(); master_->ts_manager()->GetAllLiveDescriptors(&ts_descs, blacklist); } Status s; unordered_set<TableInfo*> ok_status_tables; for (TabletInfo *tablet : deferred.needs_create_rpc) { // NOTE: if we fail to select replicas on the first pass (due to // insufficient Tablet Servers being online), we will still try // again unless the tablet/table creation is cancelled. s = SelectReplicasForTablet(ts_descs, tablet); if (!s.ok()) { s = s.CloneAndPrepend(Substitute( "An error occurred while selecting replicas for tablet $0: $1", tablet->tablet_id(), s.ToString())); tablet->table()->SetCreateTableErrorStatus(s); break; } else { ok_status_tables.emplace(tablet->table().get()); } } // Update the sys catalog with the new set of tablets/metadata. if (s.ok()) { // If any of the ok_status_tables had an error in the previous iterations, we // need to clear up the error status to reflect that all the create tablets have now // succeded. for (TableInfo* table : ok_status_tables) { table->SetCreateTableErrorStatus(Status::OK()); } s = sys_catalog_->Upsert(leader_ready_term(), deferred.modified_tablets); if (!s.ok()) { s = s.CloneAndPrepend("An error occurred while persisting the updated tablet metadata"); } } if (!s.ok()) { LOG(WARNING) << "Aborting the current task due to error: " << s.ToString(); // If there was an error, abort any mutations started by the current task. // NOTE: Lock order should be lock_ -> table -> tablet. // We currently have a bunch of tablets locked and need to unlock first to ensure this holds. struct TabletToRemove { TableInfoPtr table; TabletId tablet_id; std::string partition_key_start; }; std::vector<TabletToRemove> tablets_to_remove; for (const auto& new_tablet : new_tablets) { tablets_to_remove.push_back(TabletToRemove { .table = new_tablet->table(), .tablet_id = new_tablet->tablet_id(), .partition_key_start = new_tablet->metadata().dirty().pb.partition().partition_key_start(), }); } unlocker_out.Abort(); // tablet.unlock unlocker_in.Abort(); std::sort( tablets_to_remove.begin(), tablets_to_remove.end(), [](const auto& lhs, const auto& rhs) { return lhs.table->id() < rhs.table->id(); }); { TableInfo::ReadLock lock; TableInfoPtr current_table; for (auto& tablet_to_remove : tablets_to_remove) { if (current_table != tablet_to_remove.table) { current_table = tablet_to_remove.table; lock.Unlock(); } if (current_table->RemoveTablet(tablet_to_remove.partition_key_start)) { if (VLOG_IS_ON(1)) { if (!lock.locked()) { lock = current_table->LockForRead(); } LOG(INFO) << "Removed tablet " << tablet_to_remove.tablet_id << " from table " << lock->name(); } } } } { LockGuard lock(mutex_); // lock_.lock auto tablet_map_checkout = tablet_map_.CheckOut(); for (auto& tablet_to_remove : tablets_to_remove) { // Potential race condition above, but it's okay if a background thread deleted this. tablet_map_checkout->erase(tablet_to_remove.tablet_id); } } return s; } // Send DeleteTablet requests to tablet servers serving deleted tablets. // This is asynchronous / non-blocking. for (auto* tablet : deferred.modified_tablets) { if (tablet->metadata().dirty().is_deleted()) { // Actual delete, because we delete tablet replica. DeleteTabletReplicas(tablet, tablet->metadata().dirty().pb.state_msg(), HideOnly::kFalse); } } // Send the CreateTablet() requests to the servers. This is asynchronous / non-blocking. return SendCreateTabletRequests(deferred.needs_create_rpc); } Status CatalogManager::SelectReplicasForTablet(const TSDescriptorVector& ts_descs, TabletInfo* tablet) { auto table_guard = tablet->table()->LockForRead(); if (!table_guard->pb.IsInitialized()) { return STATUS_SUBSTITUTE(InvalidArgument, "TableInfo for tablet $0 is not initialized (aborted CreateTable attempt?)", tablet->tablet_id()); } const auto& replication_info = VERIFY_RESULT(GetTableReplicationInfo(table_guard->pb.replication_info(), tablet->table()->TablespaceIdForTableCreation())); // Select the set of replicas for the tablet. ConsensusStatePB* cstate = tablet->mutable_metadata()->mutable_dirty() ->pb.mutable_committed_consensus_state(); VLOG_WITH_FUNC(3) << "Committed consensus state: " << AsString(cstate); cstate->set_current_term(kMinimumTerm); consensus::RaftConfigPB *config = cstate->mutable_config(); config->set_opid_index(consensus::kInvalidOpIdIndex); Status s = HandlePlacementUsingReplicationInfo(replication_info, ts_descs, config); if (!s.ok()) { return s; } std::ostringstream out; out << "Initial tserver uuids for tablet " << tablet->tablet_id() << ": "; for (const RaftPeerPB& peer : config->peers()) { out << peer.permanent_uuid() << " "; } if (VLOG_IS_ON(0)) { out.str(); } VLOG_WITH_FUNC(3) << "Committed consensus state has been updated to: " << AsString(cstate); return Status::OK(); } Status CatalogManager::HandlePlacementUsingReplicationInfo( const ReplicationInfoPB& replication_info, const TSDescriptorVector& all_ts_descs, consensus::RaftConfigPB* config) { return HandlePlacementUsingPlacementInfo(replication_info.live_replicas(), all_ts_descs, RaftPeerPB::VOTER, config); } Status CatalogManager::HandlePlacementUsingPlacementInfo(const PlacementInfoPB& placement_info, const TSDescriptorVector& ts_descs, RaftPeerPB::MemberType member_type, consensus::RaftConfigPB* config) { int nreplicas = GetNumReplicasFromPlacementInfo(placement_info); if (ts_descs.size() < nreplicas) { return STATUS_SUBSTITUTE(InvalidArgument, "Not enough tablet servers in the requested placements. Need at least $0, have $1", nreplicas, ts_descs.size()); } // Keep track of servers we've already selected, so that we don't attempt to // put two replicas on the same host. set<shared_ptr<TSDescriptor>> already_selected_ts; if (placement_info.placement_blocks().empty()) { // If we don't have placement info, just place the replicas as before, distributed across the // whole cluster. SelectReplicas(ts_descs, nreplicas, config, &already_selected_ts, member_type); } else { // TODO(bogdan): move to separate function // // If we do have placement info, we'll try to use the same power of two algorithm, but also // match the requested policies. We'll assign the minimum requested replicas in each combination // of cloud.region.zone and then if we still have leftover replicas, we'll assign those // in any of the allowed areas. auto all_allowed_ts = VERIFY_RESULT(FindTServersForPlacementInfo(placement_info, ts_descs)); // Loop through placements and assign to respective available TSs. for (const auto& pb : placement_info.placement_blocks()) { auto available_ts_descs = VERIFY_RESULT(FindTServersForPlacementBlock(pb, ts_descs)); int num_replicas = pb.min_num_replicas(); SelectReplicas(available_ts_descs, num_replicas, config, &already_selected_ts, member_type); } int replicas_left = nreplicas - already_selected_ts.size(); DCHECK_GE(replicas_left, 0); if (replicas_left > 0) { // No need to do an extra check here, as we checked early if we have enough to cover all // requested placements and checked individually per placement info, if we could cover the // minimums. SelectReplicas(all_allowed_ts, replicas_left, config, &already_selected_ts, member_type); } } return Status::OK(); } Result<vector<shared_ptr<TSDescriptor>>> CatalogManager::FindTServersForPlacementInfo( const PlacementInfoPB& placement_info, const TSDescriptorVector& ts_descs) { vector<shared_ptr<TSDescriptor>> all_allowed_ts; for (const auto& ts : ts_descs) { for (const auto& pb : placement_info.placement_blocks()) { if (ts->MatchesCloudInfo(pb.cloud_info())) { all_allowed_ts.push_back(ts); break; } } } // Fail if we don't have enough tablet servers in the areas requested. const int nreplicas = placement_info.num_replicas(); if (all_allowed_ts.size() < nreplicas) { return STATUS_SUBSTITUTE(InvalidArgument, "Not enough tablet servers in the requested placements. Need at least $0, have $1", nreplicas, all_allowed_ts.size()); } return all_allowed_ts; } Result<vector<shared_ptr<TSDescriptor>>> CatalogManager::FindTServersForPlacementBlock( const PlacementBlockPB& placement_block, const TSDescriptorVector& ts_descs) { vector<shared_ptr<TSDescriptor>> allowed_ts; const auto& cloud_info = placement_block.cloud_info(); for (const auto& ts : ts_descs) { if (ts->MatchesCloudInfo(cloud_info)) { allowed_ts.push_back(ts); } } // Verify that there are sufficient TServers to satisfy min_num_replicas. int num_replicas = placement_block.min_num_replicas(); if (allowed_ts.size() < num_replicas) { return STATUS_SUBSTITUTE(InvalidArgument, "Not enough tablet servers in $0. Need at least $1 but only have $2.", TSDescriptor::generate_placement_id(cloud_info), num_replicas, allowed_ts.size()); } return allowed_ts; } Status CatalogManager::SendCreateTabletRequests(const vector<TabletInfo*>& tablets) { auto schedules_to_tablets_map = VERIFY_RESULT(MakeSnapshotSchedulesToObjectIdsMap( SysRowEntry::TABLET)); for (TabletInfo *tablet : tablets) { const consensus::RaftConfigPB& config = tablet->metadata().dirty().pb.committed_consensus_state().config(); tablet->set_last_update_time(MonoTime::Now()); std::vector<SnapshotScheduleId> schedules; for (const auto& pair : schedules_to_tablets_map) { if (std::binary_search(pair.second.begin(), pair.second.end(), tablet->id())) { schedules.push_back(pair.first); } } for (const RaftPeerPB& peer : config.peers()) { auto task = std::make_shared<AsyncCreateReplica>(master_, AsyncTaskPool(), peer.permanent_uuid(), tablet, schedules); tablet->table()->AddTask(task); WARN_NOT_OK(ScheduleTask(task), "Failed to send new tablet request"); } } return Status::OK(); } // If responses have been received from sufficient replicas (including hinted leader), // pick proposed leader and start election. void CatalogManager::StartElectionIfReady( const consensus::ConsensusStatePB& cstate, TabletInfo* tablet) { auto replicas = tablet->GetReplicaLocations(); int num_voters = 0; for (const auto& peer : cstate.config().peers()) { if (peer.member_type() == RaftPeerPB::VOTER) { ++num_voters; } } int majority_size = num_voters / 2 + 1; int running_voters = 0; for (const auto& replica : *replicas) { if (replica.second.member_type == RaftPeerPB::VOTER) { ++running_voters; } } VLOG_WITH_PREFIX(4) << __func__ << ": T " << tablet->tablet_id() << ": " << AsString(*replicas) << ", voters: " << running_voters << "/" << majority_size; if (running_voters < majority_size) { VLOG_WITH_PREFIX(4) << __func__ << ": Not enough voters"; return; } ReplicationInfoPB replication_info; { auto l = cluster_config_->LockForRead(); replication_info = l->pb.replication_info(); } // Find tservers that can be leaders for a tablet. TSDescriptorVector ts_descs; { BlacklistSet blacklist = BlacklistSetFromPB(); master_->ts_manager()->GetAllLiveDescriptors(&ts_descs, blacklist); } std::vector<std::string> possible_leaders; for (const auto& replica : *replicas) { for (const auto& ts_desc : ts_descs) { if (ts_desc->permanent_uuid() == replica.first) { if (ts_desc->IsAcceptingLeaderLoad(replication_info)) { possible_leaders.push_back(replica.first); } break; } } } if (FLAGS_TEST_create_table_leader_hint_min_lexicographic) { std::string min_lexicographic; for (const auto& peer : cstate.config().peers()) { if (peer.member_type() == RaftPeerPB::VOTER) { if (min_lexicographic.empty() || peer.permanent_uuid() < min_lexicographic) { min_lexicographic = peer.permanent_uuid(); } } } if (min_lexicographic.empty() || !replicas->count(min_lexicographic)) { LOG_WITH_PREFIX(INFO) << __func__ << ": Min lexicographic is not yet ready: " << min_lexicographic; return; } possible_leaders = { min_lexicographic }; } if (possible_leaders.empty()) { VLOG_WITH_PREFIX(4) << __func__ << ": Cannot pick candidate"; return; } if (!tablet->InitiateElection()) { VLOG_WITH_PREFIX(4) << __func__ << ": Already initiated"; return; } const auto& protege = RandomElement(possible_leaders); LOG_WITH_PREFIX(INFO) << "Starting election at " << tablet->tablet_id() << " in favor of " << protege; auto task = std::make_shared<AsyncStartElection>(master_, AsyncTaskPool(), protege, tablet); tablet->table()->AddTask(task); WARN_NOT_OK(task->Run(), "Failed to send new tablet start election request"); } shared_ptr<TSDescriptor> CatalogManager::PickBetterReplicaLocation( const TSDescriptorVector& two_choices) { DCHECK_EQ(two_choices.size(), 2); const auto& a = two_choices[0]; const auto& b = two_choices[1]; // When creating replicas, we consider two aspects of load: // (1) how many tablet replicas are already on the server, and // (2) how often we've chosen this server recently. // // The first factor will attempt to put more replicas on servers that // are under-loaded (eg because they have newly joined an existing cluster, or have // been reformatted and re-joined). // // The second factor will ensure that we take into account the recent selection // decisions even if those replicas are still in the process of being created (and thus // not yet reported by the server). This is important because, while creating a table, // we batch the selection process before sending any creation commands to the // servers themselves. // // TODO: in the future we may want to factor in other items such as available disk space, // actual request load, etc. double load_a = a->RecentReplicaCreations() + a->num_live_replicas(); double load_b = b->RecentReplicaCreations() + b->num_live_replicas(); if (load_a < load_b) { return a; } else if (load_b < load_a) { return b; } else { // If the load is the same, we can just pick randomly. return two_choices[rng_.Uniform(2)]; } } shared_ptr<TSDescriptor> CatalogManager::SelectReplica( const TSDescriptorVector& ts_descs, const set<shared_ptr<TSDescriptor>>& excluded) { // The replica selection algorithm follows the idea from // "Power of Two Choices in Randomized Load Balancing"[1]. For each replica, // we randomly select two tablet servers, and then assign the replica to the // less-loaded one of the two. This has some nice properties: // // 1) because the initial selection of two servers is random, we get good // spreading of replicas across the cluster. In contrast if we sorted by // load and always picked under-loaded servers first, we'd end up causing // all tablets of a new table to be placed on an empty server. This wouldn't // give good load balancing of that table. // // 2) because we pick the less-loaded of two random choices, we do end up with a // weighting towards filling up the underloaded one over time, without // the extreme scenario above. // // 3) because we don't follow any sequential pattern, every server is equally // likely to replicate its tablets to every other server. In contrast, a // round-robin design would enforce that each server only replicates to its // adjacent nodes in the TS sort order, limiting recovery bandwidth (see // KUDU-1317). // // [1] http://www.eecs.harvard.edu/~michaelm/postscripts/mythesis.pdf // Pick two random servers, excluding those we've already picked. // If we've only got one server left, 'two_choices' will actually // just contain one element. vector<shared_ptr<TSDescriptor>> two_choices; rng_.ReservoirSample(ts_descs, 2, excluded, &two_choices); if (two_choices.size() == 2) { // Pick the better of the two. return PickBetterReplicaLocation(two_choices); } // If we couldn't randomly sample two servers, it's because we only had one // more non-excluded choice left. CHECK_EQ(1, two_choices.size()) << "ts_descs: " << ts_descs.size() << " already_sel: " << excluded.size(); return two_choices[0]; } void CatalogManager::SelectReplicas( const TSDescriptorVector& ts_descs, int nreplicas, consensus::RaftConfigPB* config, set<shared_ptr<TSDescriptor>>* already_selected_ts, RaftPeerPB::MemberType member_type) { DCHECK_LE(nreplicas, ts_descs.size()); for (int i = 0; i < nreplicas; ++i) { // We have to derefence already_selected_ts here, as the inner mechanics uses ReservoirSample, // which in turn accepts only a reference to the set, not a pointer. Alternatively, we could // have passed it in as a non-const reference, but that goes against our argument passing // convention. // // TODO(bogdan): see if we indeed want to switch back to non-const reference. shared_ptr<TSDescriptor> ts = SelectReplica(ts_descs, *already_selected_ts); InsertOrDie(already_selected_ts, ts); // Increment the number of pending replicas so that we take this selection into // account when assigning replicas for other tablets of the same table. This // value decays back to 0 over time. ts->IncrementRecentReplicaCreations(); TSRegistrationPB reg = ts->GetRegistration(); RaftPeerPB *peer = config->add_peers(); peer->set_permanent_uuid(ts->permanent_uuid()); // TODO: This is temporary, we will use only UUIDs. TakeRegistration(reg.mutable_common(), peer); peer->set_member_type(member_type); } } Status CatalogManager::ConsensusStateToTabletLocations(const consensus::ConsensusStatePB& cstate, TabletLocationsPB* locs_pb) { for (const consensus::RaftPeerPB& peer : cstate.config().peers()) { TabletLocationsPB_ReplicaPB* replica_pb = locs_pb->add_replicas(); if (!peer.has_permanent_uuid()) { return STATUS_SUBSTITUTE(IllegalState, "Missing UUID $0", peer.ShortDebugString()); } replica_pb->set_role(GetConsensusRole(peer.permanent_uuid(), cstate)); if (peer.has_member_type()) { replica_pb->set_member_type(peer.member_type()); } else { replica_pb->set_member_type(RaftPeerPB::UNKNOWN_MEMBER_TYPE); } TSInfoPB* tsinfo_pb = replica_pb->mutable_ts_info(); tsinfo_pb->set_permanent_uuid(peer.permanent_uuid()); CopyRegistration(peer, tsinfo_pb); } return Status::OK(); } Status CatalogManager::BuildLocationsForTablet(const scoped_refptr<TabletInfo>& tablet, TabletLocationsPB* locs_pb) { { auto l_tablet = tablet->LockForRead(); if (l_tablet->is_hidden()) { return STATUS_FORMAT(NotFound, "Tablet hidden", tablet->id()); } locs_pb->set_table_id(l_tablet->pb.table_id()); *locs_pb->mutable_table_ids() = l_tablet->pb.table_ids(); } // For system tables, the set of replicas is always the set of masters. if (system_tablets_.find(tablet->id()) != system_tablets_.end()) { consensus::ConsensusStatePB master_consensus; RETURN_NOT_OK(GetCurrentConfig(&master_consensus)); locs_pb->set_tablet_id(tablet->tablet_id()); locs_pb->set_stale(false); const auto initial_size = locs_pb->replicas_size(); RETURN_NOT_OK(ConsensusStateToTabletLocations(master_consensus, locs_pb)); const auto capabilities = Capabilities(); // Set capabilities of master node for all newly created system table locations. for (auto i = locs_pb->mutable_replicas()->begin() + initial_size, end = locs_pb->mutable_replicas()->end(); i != end; ++i) { *i->mutable_ts_info()->mutable_capabilities() = google::protobuf::RepeatedField<CapabilityId>( capabilities.begin(), capabilities.end()); } return Status::OK(); } TSRegistrationPB reg; std::shared_ptr<const TabletInfo::ReplicaMap> locs; consensus::ConsensusStatePB cstate; { auto l_tablet = tablet->LockForRead(); if (PREDICT_FALSE(l_tablet->is_deleted())) { std::vector<TabletId> split_tablet_ids; for (const auto& split_tablet_id : l_tablet->pb.split_tablet_ids()) { split_tablet_ids.push_back(split_tablet_id); } return STATUS( NotFound, "Tablet deleted", l_tablet->pb.state_msg(), SplitChildTabletIdsData(split_tablet_ids)); } if (PREDICT_FALSE(!l_tablet->is_running())) { return STATUS_FORMAT(ServiceUnavailable, "Tablet $0 not running", tablet->id()); } locs = tablet->GetReplicaLocations(); if (locs->empty() && l_tablet->pb.has_committed_consensus_state()) { cstate = l_tablet->pb.committed_consensus_state(); } const auto& metadata = tablet->metadata().state().pb; locs_pb->mutable_partition()->CopyFrom(metadata.partition()); locs_pb->set_split_depth(metadata.split_depth()); locs_pb->set_split_parent_tablet_id(metadata.split_parent_tablet_id()); for (const auto& split_tablet_id : metadata.split_tablet_ids()) { *locs_pb->add_split_tablet_ids() = split_tablet_id; } } locs_pb->set_tablet_id(tablet->tablet_id()); locs_pb->set_stale(locs->empty()); // If the locations are cached. if (!locs->empty()) { if (cstate.IsInitialized() && locs->size() != cstate.config().peers_size()) { LOG(WARNING) << "Cached tablet replicas " << locs->size() << " does not match consensus " << cstate.config().peers_size(); } for (const TabletInfo::ReplicaMap::value_type& replica : *locs) { TabletLocationsPB_ReplicaPB* replica_pb = locs_pb->add_replicas(); replica_pb->set_role(replica.second.role); replica_pb->set_member_type(replica.second.member_type); auto tsinfo_pb = replica.second.ts_desc->GetTSInformationPB(); TSInfoPB* out_ts_info = replica_pb->mutable_ts_info(); out_ts_info->set_permanent_uuid(tsinfo_pb->tserver_instance().permanent_uuid()); CopyRegistration(tsinfo_pb->registration().common(), out_ts_info); out_ts_info->set_placement_uuid(tsinfo_pb->registration().common().placement_uuid()); *out_ts_info->mutable_capabilities() = tsinfo_pb->registration().capabilities(); } return Status::OK(); } // If the locations were not cached. // TODO: Why would this ever happen? See KUDU-759. if (cstate.IsInitialized()) { RETURN_NOT_OK(ConsensusStateToTabletLocations(cstate, locs_pb)); } return Status::OK(); } Result<shared_ptr<tablet::AbstractTablet>> CatalogManager::GetSystemTablet(const TabletId& id) { const auto iter = system_tablets_.find(id); if (iter == system_tablets_.end()) { return STATUS_SUBSTITUTE(InvalidArgument, "$0 is not a valid system tablet id", id); } return iter->second; } Status CatalogManager::GetTabletLocations(const TabletId& tablet_id, TabletLocationsPB* locs_pb) { scoped_refptr<TabletInfo> tablet_info; { SharedLock lock(mutex_); if (!FindCopy(*tablet_map_, tablet_id, &tablet_info)) { return STATUS_SUBSTITUTE(NotFound, "Unknown tablet $0", tablet_id); } } Status s = GetTabletLocations(tablet_info, locs_pb); int num_replicas = 0; if (GetReplicationFactorForTablet(tablet_info, &num_replicas).ok() && num_replicas > 0 && locs_pb->replicas().size() != num_replicas) { YB_LOG_EVERY_N_SECS(WARNING, 1) << "Expected replicas " << num_replicas << " but found " << locs_pb->replicas().size() << " for tablet " << tablet_info->id() << ": " << locs_pb->ShortDebugString() << THROTTLE_MSG; } return s; } Status CatalogManager::GetTabletLocations( scoped_refptr<TabletInfo> tablet_info, TabletLocationsPB* locs_pb) { DCHECK_EQ(locs_pb->replicas().size(), 0); locs_pb->mutable_replicas()->Clear(); return BuildLocationsForTablet(tablet_info, locs_pb); } Status CatalogManager::GetTableLocations( const GetTableLocationsRequestPB* req, GetTableLocationsResponsePB* resp) { VLOG(4) << "GetTableLocations: " << req->ShortDebugString(); // If start-key is > end-key report an error instead of swap the two // since probably there is something wrong app-side. if (req->has_partition_key_start() && req->has_partition_key_end() && req->partition_key_start() > req->partition_key_end()) { return STATUS(InvalidArgument, "start partition key is greater than the end partition key"); } if (req->max_returned_locations() <= 0) { return STATUS(InvalidArgument, "max_returned_locations must be greater than 0"); } scoped_refptr<TableInfo> table = VERIFY_RESULT(FindTable(req->table())); if (table->IsCreateInProgress()) { resp->set_creating(true); } auto l = table->LockForRead(); RETURN_NOT_OK(CheckIfTableDeletedOrNotVisibleToClient(l, resp)); vector<scoped_refptr<TabletInfo>> tablets_in_range; table->GetTabletsInRange(req, &tablets_in_range); bool require_tablets_runnings = req->require_tablets_running(); int expected_live_replicas = 0; int expected_read_replicas = 0; GetExpectedNumberOfReplicas(&expected_live_replicas, &expected_read_replicas); for (const scoped_refptr<TabletInfo>& tablet : tablets_in_range) { TabletLocationsPB* locs_pb = resp->add_tablet_locations(); locs_pb->set_expected_live_replicas(expected_live_replicas); locs_pb->set_expected_read_replicas(expected_read_replicas); auto status = BuildLocationsForTablet(tablet, locs_pb); if (!status.ok()) { // Not running. if (require_tablets_runnings) { resp->mutable_tablet_locations()->Clear(); return SetupError(resp->mutable_error(), MasterErrorPB::OBJECT_NOT_FOUND, status); } resp->mutable_tablet_locations()->RemoveLast(); } } resp->set_table_type(l->pb.table_type()); resp->set_partition_list_version(l->pb.partition_list_version()); return Status::OK(); } Status CatalogManager::GetCurrentConfig(consensus::ConsensusStatePB* cpb) const { auto tablet_peer = sys_catalog_->tablet_peer(); auto consensus = tablet_peer ? tablet_peer->shared_consensus() : nullptr; if (!consensus) { std::string uuid = master_->fs_manager()->uuid(); return STATUS_FORMAT(IllegalState, "Node $0 peer not initialized.", uuid); } *cpb = consensus->ConsensusState(CONSENSUS_CONFIG_COMMITTED); return Status::OK(); } void CatalogManager::DumpState(std::ostream* out, bool on_disk_dump) const { NamespaceInfoMap namespace_ids_copy; TableInfoMap ids_copy; TableInfoByNameMap names_copy; TabletInfoMap tablets_copy; // Copy the internal state so that, if the output stream blocks, // we don't end up holding the lock for a long time. { SharedLock lock(mutex_); namespace_ids_copy = namespace_ids_map_; ids_copy = *table_ids_map_; names_copy = table_names_map_; tablets_copy = *tablet_map_; } *out << "Dumping current state of master.\nNamespaces:\n"; for (const NamespaceInfoMap::value_type& e : namespace_ids_copy) { NamespaceInfo* t = e.second.get(); auto l = t->LockForRead(); const NamespaceName& name = l->name(); *out << t->id() << ":\n"; *out << " name: \"" << strings::CHexEscape(name) << "\"\n"; *out << " metadata: " << l->pb.ShortDebugString() << "\n"; } *out << "Tables:\n"; for (const TableInfoMap::value_type& e : ids_copy) { TableInfo* t = e.second.get(); vector<scoped_refptr<TabletInfo>> table_tablets; { auto l = t->LockForRead(); const TableName& name = l->name(); const NamespaceId& namespace_id = l->namespace_id(); // Find namespace by its ID. scoped_refptr<NamespaceInfo> ns = FindPtrOrNull(namespace_ids_copy, namespace_id); *out << t->id() << ":\n"; *out << " namespace id: \"" << strings::CHexEscape(namespace_id) << "\"\n"; if (ns != nullptr) { *out << " namespace name: \"" << strings::CHexEscape(ns->name()) << "\"\n"; } *out << " name: \"" << strings::CHexEscape(name) << "\"\n"; // Erase from the map, so later we can check that we don't have // any orphaned tables in the by-name map that aren't in the // by-id map. if (names_copy.erase({namespace_id, name}) != 1) { *out << " [not present in by-name map]\n"; } *out << " metadata: " << l->pb.ShortDebugString() << "\n"; *out << " tablets:\n"; t->GetAllTablets(&table_tablets); } for (const scoped_refptr<TabletInfo>& tablet : table_tablets) { auto l_tablet = tablet->LockForRead(); *out << " " << tablet->tablet_id() << ": " << l_tablet->pb.ShortDebugString() << "\n"; if (tablets_copy.erase(tablet->tablet_id()) != 1) { *out << " [ERROR: not present in CM tablet map!]\n"; } } } if (!tablets_copy.empty()) { *out << "Orphaned tablets (not referenced by any table):\n"; for (const TabletInfoMap::value_type& entry : tablets_copy) { const scoped_refptr<TabletInfo>& tablet = entry.second; auto l_tablet = tablet->LockForRead(); *out << " " << tablet->tablet_id() << ": " << l_tablet->pb.ShortDebugString() << "\n"; } } if (!names_copy.empty()) { *out << "Orphaned tables (in by-name map, but not id map):\n"; for (const TableInfoByNameMap::value_type& e : names_copy) { *out << e.second->id() << ":\n"; *out << " namespace id: \"" << strings::CHexEscape(e.first.first) << "\"\n"; *out << " name: \"" << CHexEscape(e.first.second) << "\"\n"; } } master_->DumpMasterOptionsInfo(out); if (on_disk_dump) { consensus::ConsensusStatePB cur_consensus_state; // TODO: proper error handling below. CHECK_OK(GetCurrentConfig(&cur_consensus_state)); *out << "Current raft config: " << cur_consensus_state.ShortDebugString() << "\n"; } } Status CatalogManager::PeerStateDump(const vector<RaftPeerPB>& peers, const DumpMasterStateRequestPB* req, DumpMasterStateResponsePB* resp) { std::unique_ptr<MasterServiceProxy> peer_proxy; Endpoint sockaddr; MonoTime timeout = MonoTime::Now(); DumpMasterStateRequestPB peer_req; rpc::RpcController rpc; timeout.AddDelta(MonoDelta::FromMilliseconds(FLAGS_master_ts_rpc_timeout_ms)); rpc.set_deadline(timeout); peer_req.set_on_disk(req->on_disk()); peer_req.set_return_dump_as_string(req->return_dump_as_string()); string dump; for (const RaftPeerPB& peer : peers) { HostPort hostport = HostPortFromPB(DesiredHostPort(peer, master_->MakeCloudInfoPB())); peer_proxy.reset(new MasterServiceProxy(&master_->proxy_cache(), hostport)); DumpMasterStateResponsePB peer_resp; rpc.Reset(); RETURN_NOT_OK(peer_proxy->DumpState(peer_req, &peer_resp, &rpc)); if (peer_resp.has_error()) { LOG(WARNING) << "Hit err " << peer_resp.ShortDebugString() << " during peer " << peer.ShortDebugString() << " state dump."; return StatusFromPB(peer_resp.error().status()); } else if (req->return_dump_as_string()) { dump += peer_resp.dump(); } } if (req->return_dump_as_string()) { resp->set_dump(resp->dump() + dump); } return Status::OK(); } void CatalogManager::ReportMetrics() { // Report metrics on how many tservers are alive. TSDescriptorVector ts_descs; master_->ts_manager()->GetAllLiveDescriptors(&ts_descs); const int32 num_live_servers = ts_descs.size(); metric_num_tablet_servers_live_->set_value(num_live_servers); master_->ts_manager()->GetAllDescriptors(&ts_descs); metric_num_tablet_servers_dead_->set_value(ts_descs.size() - num_live_servers); } void CatalogManager::ResetMetrics() { metric_num_tablet_servers_live_->set_value(0); metric_num_tablet_servers_dead_->set_value(0); } std::string CatalogManager::LogPrefix() const { if (tablet_peer()) { return consensus::MakeTabletLogPrefix( tablet_peer()->tablet_id(), tablet_peer()->permanent_uuid()); } else { return consensus::MakeTabletLogPrefix( kSysCatalogTabletId, master_->fs_manager()->uuid()); } } void CatalogManager::SetLoadBalancerEnabled(bool is_enabled) { load_balance_policy_->SetLoadBalancerEnabled(is_enabled); } bool CatalogManager::IsLoadBalancerEnabled() { return load_balance_policy_->IsLoadBalancerEnabled(); } MonoDelta CatalogManager::TimeSinceElectedLeader() { return MonoTime::Now() - time_elected_leader_; } Status CatalogManager::GoIntoShellMode() { if (master_->IsShellMode()) { return STATUS(IllegalState, "Master is already in shell mode."); } LOG(INFO) << "Starting going into shell mode."; master_->SetShellMode(true); { LockGuard lock(mutex_); RETURN_NOT_OK(sys_catalog_->GoIntoShellMode()); background_tasks_->Shutdown(); background_tasks_.reset(); } { std::lock_guard<std::mutex> l(remote_bootstrap_mtx_); tablet_exists_ = false; } LOG(INFO) << "Done going into shell mode."; return Status::OK(); } Status CatalogManager::GetClusterConfig(GetMasterClusterConfigResponsePB* resp) { return GetClusterConfig(resp->mutable_cluster_config()); } Status CatalogManager::GetClusterConfig(SysClusterConfigEntryPB* config) { DCHECK(cluster_config_) << "Missing cluster config for master!"; auto l = cluster_config_->LockForRead(); *config = l->pb; return Status::OK(); } Status CatalogManager::SetClusterConfig( const ChangeMasterClusterConfigRequestPB* req, ChangeMasterClusterConfigResponsePB* resp) { SysClusterConfigEntryPB config(req->cluster_config()); if (config.has_server_blacklist()) { config.mutable_server_blacklist()->set_initial_replica_load( GetNumRelevantReplicas(config.server_blacklist(), false /* leaders_only */)); LOG(INFO) << Format("Set blacklist of total tservers: $0, with initial load: $1", config.server_blacklist().hosts().size(), config.server_blacklist().initial_replica_load()); } if (config.has_leader_blacklist()) { config.mutable_leader_blacklist()->set_initial_leader_load( GetNumRelevantReplicas(config.leader_blacklist(), true /* leaders_only */)); LOG(INFO) << Format("Set leader blacklist of total tservers: $0, with initial load: $1", config.leader_blacklist().hosts().size(), config.leader_blacklist().initial_leader_load()); } auto l = cluster_config_->LockForWrite(); // We should only set the config, if the caller provided us with a valid update to the // existing config. if (l->pb.version() != config.version()) { Status s = STATUS_SUBSTITUTE(IllegalState, "Config version does not match, got $0, but most recent one is $1. Should call Get again", config.version(), l->pb.version()); return SetupError(resp->mutable_error(), MasterErrorPB::CONFIG_VERSION_MISMATCH, s); } if (config.cluster_uuid() != l->pb.cluster_uuid()) { Status s = STATUS(InvalidArgument, "Config cluster UUID cannot be updated"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_CLUSTER_CONFIG, s); } // TODO(bogdan): should this live here? const ReplicationInfoPB& replication_info = config.replication_info(); for (int i = 0; i < replication_info.read_replicas_size(); i++) { if (!replication_info.read_replicas(i).has_placement_uuid()) { Status s = STATUS(IllegalState, "All read-only clusters must have a placement uuid specified"); return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_CLUSTER_CONFIG, s); } } // Validate placement information according to rules defined. if (replication_info.has_live_replicas()) { Status s = CatalogManagerUtil::IsPlacementInfoValid(replication_info.live_replicas()); if (!s.ok()) { return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_CLUSTER_CONFIG, s); } } l.mutable_data()->pb.CopyFrom(config); // Bump the config version, to indicate an update. l.mutable_data()->pb.set_version(config.version() + 1); LOG(INFO) << "Updating cluster config to " << config.version() + 1; RETURN_NOT_OK(sys_catalog_->Upsert(leader_ready_term(), cluster_config_)); l.Commit(); return Status::OK(); } Status CatalogManager::SetPreferredZones( const SetPreferredZonesRequestPB* req, SetPreferredZonesResponsePB* resp) { auto l = cluster_config_->LockForWrite(); auto replication_info = l.mutable_data()->pb.mutable_replication_info(); replication_info->clear_affinitized_leaders(); Status s; for (const auto& cloud_info : req->preferred_zones()) { s = CatalogManagerUtil::DoesPlacementInfoContainCloudInfo(replication_info->live_replicas(), cloud_info); if (!s.ok()) { return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_CLUSTER_CONFIG, s); } *replication_info->add_affinitized_leaders() = cloud_info; } l.mutable_data()->pb.set_version(l.mutable_data()->pb.version() + 1); LOG(INFO) << "Updating cluster config to " << l.mutable_data()->pb.version(); s = sys_catalog_->Upsert(leader_ready_term(), cluster_config_);; if (!s.ok()) { return SetupError(resp->mutable_error(), MasterErrorPB::INVALID_CLUSTER_CONFIG, s); } l.Commit(); return Status::OK(); } Status CatalogManager::GetReplicationFactor(int* num_replicas) { DCHECK(cluster_config_) << "Missing cluster config for master!"; auto l = cluster_config_->LockForRead(); const ReplicationInfoPB& replication_info = l->pb.replication_info(); *num_replicas = GetNumReplicasFromPlacementInfo(replication_info.live_replicas()); return Status::OK(); } Status CatalogManager::GetReplicationFactorForTablet(const scoped_refptr<TabletInfo>& tablet, int* num_replicas) { // For system tables, the set of replicas is always the set of masters. if (system_tablets_.find(tablet->id()) != system_tablets_.end()) { consensus::ConsensusStatePB master_consensus; RETURN_NOT_OK(GetCurrentConfig(&master_consensus)); *num_replicas = master_consensus.config().peers().size(); return Status::OK(); } int num_live_replicas = 0, num_read_replicas = 0; GetExpectedNumberOfReplicas(&num_live_replicas, &num_read_replicas); *num_replicas = num_live_replicas + num_read_replicas; return Status::OK(); } void CatalogManager::GetExpectedNumberOfReplicas(int* num_live_replicas, int* num_read_replicas) { auto l = cluster_config_->LockForRead(); const ReplicationInfoPB& replication_info = l->pb.replication_info(); *num_live_replicas = GetNumReplicasFromPlacementInfo(replication_info.live_replicas()); for (const auto& read_replica_placement_info : replication_info.read_replicas()) { *num_read_replicas += read_replica_placement_info.num_replicas(); } } string CatalogManager::placement_uuid() const { DCHECK(cluster_config_) << "Missing cluster config for master!"; auto l = cluster_config_->LockForRead(); const ReplicationInfoPB& replication_info = l->pb.replication_info(); return replication_info.live_replicas().placement_uuid(); } Status CatalogManager::IsLoadBalanced(const IsLoadBalancedRequestPB* req, IsLoadBalancedResponsePB* resp) { if (req->has_expected_num_servers()) { TSDescriptorVector ts_descs; master_->ts_manager()->GetAllLiveDescriptors(&ts_descs); if (req->expected_num_servers() > ts_descs.size()) { Status s = STATUS_SUBSTITUTE(IllegalState, "Found $0, which is below the expected number of servers $1.", ts_descs.size(), req->expected_num_servers()); return SetupError(resp->mutable_error(), MasterErrorPB::CAN_RETRY_LOAD_BALANCE_CHECK, s); } } Status s = load_balance_policy_->IsIdle(); if (!s.ok()) { return SetupError(resp->mutable_error(), MasterErrorPB::CAN_RETRY_LOAD_BALANCE_CHECK, s); } return Status::OK(); } Status CatalogManager::IsLoadBalancerIdle(const IsLoadBalancerIdleRequestPB* req, IsLoadBalancerIdleResponsePB* resp) { Status s = load_balance_policy_->IsIdle(); if (!s.ok()) { return SetupError(resp->mutable_error(), MasterErrorPB::LOAD_BALANCER_RECENTLY_ACTIVE, s); } return Status::OK(); } Status CatalogManager::AreLeadersOnPreferredOnly(const AreLeadersOnPreferredOnlyRequestPB* req, AreLeadersOnPreferredOnlyResponsePB* resp) { // If we have cluster replication info, then only fetch live tservers (ignore read replicas). TSDescriptorVector ts_descs; string live_replicas_placement_uuid = ""; { auto l = cluster_config_->LockForRead(); const ReplicationInfoPB& cluster_replication_info = l->pb.replication_info(); if (cluster_replication_info.has_live_replicas()) { live_replicas_placement_uuid = cluster_replication_info.live_replicas().placement_uuid(); } } { BlacklistSet blacklist = BlacklistSetFromPB(); if (live_replicas_placement_uuid.empty()) { master_->ts_manager()->GetAllLiveDescriptors(&ts_descs, blacklist); } else { master_->ts_manager()->GetAllLiveDescriptorsInCluster( &ts_descs, live_replicas_placement_uuid, blacklist); } } // Only need to fetch if txn tables are not using preferred zones. vector<TableInfoPtr> tables; if (!FLAGS_transaction_tables_use_preferred_zones) { tables = master_->catalog_manager()->GetTables(GetTablesMode::kRunning); } auto l = cluster_config_->LockForRead(); Status s = CatalogManagerUtil::AreLeadersOnPreferredOnly( ts_descs, l->pb.replication_info(), tables); if (!s.ok()) { return SetupError( resp->mutable_error(), MasterErrorPB::CAN_RETRY_ARE_LEADERS_ON_PREFERRED_ONLY_CHECK, s); } return Status::OK(); } int64_t CatalogManager::GetNumRelevantReplicas(const BlacklistPB& blacklist, bool leaders_only) { int64_t res = 0; SharedLock lock(mutex_); for (const TabletInfoMap::value_type& entry : *tablet_map_) { scoped_refptr<TabletInfo> tablet = entry.second; auto l = tablet->LockForRead(); // Not checking being created on purpose as we do not want initial load to be under accounted. if (!tablet->table() || PREDICT_FALSE(l->is_deleted())) { continue; } auto locs = tablet->GetReplicaLocations(); for (const TabletInfo::ReplicaMap::value_type& replica : *locs) { if (leaders_only && replica.second.role != RaftPeerPB::LEADER) { continue; } for (int i = 0; i < blacklist.hosts_size(); i++) { if (replica.second.ts_desc->IsRunningOn(blacklist.hosts(i))) { ++res; break; } } } } return res; } Status CatalogManager::FillHeartbeatResponse(const TSHeartbeatRequestPB* req, TSHeartbeatResponsePB* resp) { return Status::OK(); } Status CatalogManager::GetLoadMoveCompletionPercent(GetLoadMovePercentResponsePB* resp) { return GetLoadMoveCompletionPercent(resp, false); } Status CatalogManager::GetLeaderBlacklistCompletionPercent(GetLoadMovePercentResponsePB* resp) { return GetLoadMoveCompletionPercent(resp, true); } Status CatalogManager::GetLoadMoveCompletionPercent(GetLoadMovePercentResponsePB* resp, bool blacklist_leader) { auto l = cluster_config_->LockForRead(); // Fine to pass in empty defaults if server_blacklist or leader_blacklist is not filled. const BlacklistPB& state = blacklist_leader ? l->pb.leader_blacklist() : l->pb.server_blacklist(); int64_t blacklist_replicas = GetNumRelevantReplicas(state, blacklist_leader); int64_t initial_load = (blacklist_leader) ? state.initial_leader_load(): state.initial_replica_load(); // If we are starting up and don't find any load on the tservers, return progress as 0. // We expect that by blacklist_progress_initial_delay_secs time, this should go away and if the // load is reported as 0 on the blacklisted tservers after this time then it means that // the transfer is successfully complete. if (blacklist_replicas == 0 && TimeSinceElectedLeader() <= MonoDelta::FromSeconds(FLAGS_blacklist_progress_initial_delay_secs)) { LOG(INFO) << "Master leadership has changed. Reporting progress as 0 until the catalog " << "manager gets the correct estimates of the remaining load on the blacklisted" << "tservers."; resp->set_percent(0); resp->set_total(initial_load); resp->set_remaining(initial_load); return Status::OK(); } // On change of master leader, initial_load_ information may be lost temporarily. Reset to // current value to avoid reporting progress percent as 100. Note that doing so will report // progress percent as 0 instead. // TODO(Sanket): This might be no longer relevant after we persist and load the initial load // on failover. Need to investigate. if (initial_load < blacklist_replicas) { LOG(INFO) << Format("Initial load: $0, current load: $1." " Initial load is less than the current load. Probably a master leader change." " Reporting progress as 0", state.initial_replica_load(), blacklist_replicas); initial_load = blacklist_replicas; } LOG(INFO) << "Blacklisted count " << blacklist_replicas << " across " << state.hosts_size() << " servers, with initial load " << initial_load; // Case when a blacklisted servers did not have any starting load. if (initial_load == 0) { resp->set_percent(100); return Status::OK(); } resp->set_percent( 100 - (static_cast<double>(blacklist_replicas) * 100 / initial_load)); resp->set_remaining(blacklist_replicas); resp->set_total(initial_load); return Status::OK(); } void CatalogManager::AbortAndWaitForAllTasks(const vector<scoped_refptr<TableInfo>>& tables) { for (const auto& t : tables) { VLOG(1) << "Aborting tasks for table " << t->ToString(); t->AbortTasksAndClose(); } for (const auto& t : tables) { VLOG(1) << "Waiting on Aborting tasks for table " << t->ToString(); t->WaitTasksCompletion(); } VLOG(1) << "Waiting on Aborting tasks done"; } void CatalogManager::HandleNewTableId(const TableId& table_id) { if (table_id == kPgProcTableId) { // Needed to track whether initdb has started running. pg_proc_exists_.store(true, std::memory_order_release); } } scoped_refptr<TableInfo> CatalogManager::NewTableInfo(TableId id) { return make_scoped_refptr<TableInfo>(id, tasks_tracker_); } Status CatalogManager::ScheduleTask(std::shared_ptr<RetryingTSRpcTask> task) { Status s = async_task_pool_->SubmitFunc([task]() { WARN_NOT_OK(task->Run(), "Failed task"); }); // If we are not able to enqueue, abort the task. if (!s.ok()) { task->AbortAndReturnPrevState(s); } return s; } Status CatalogManager::CollectTable( const TableDescription& table_description, CollectFlags flags, std::vector<TableDescription>* all_tables, std::unordered_set<NamespaceId>* parent_colocated_table_ids) { auto lock = table_description.table_info->LockForRead(); if (lock->started_hiding()) { VLOG_WITH_PREFIX_AND_FUNC(4) << "Rejected hidden table: " << AsString(table_description.table_info); return Status::OK(); } if (lock->started_deleting()) { VLOG_WITH_PREFIX_AND_FUNC(4) << "Rejected deleted table: " << AsString(table_description.table_info); return Status::OK(); } if (flags.Test(CollectFlag::kIncludeParentColocatedTable) && lock->pb.colocated()) { // If a table is colocated, add its parent colocated table as well. const auto parent_table_id = table_description.namespace_info->id() + kColocatedParentTableIdSuffix; auto result = parent_colocated_table_ids->insert(parent_table_id); if (result.second) { // We have not processed this parent table id yet, so do that now. TableIdentifierPB parent_table_pb; parent_table_pb.set_table_id(parent_table_id); parent_table_pb.mutable_namespace_()->set_id(table_description.namespace_info->id()); all_tables->push_back(VERIFY_RESULT(DescribeTable( parent_table_pb, flags.Test(CollectFlag::kSucceedIfCreateInProgress)))); } } all_tables->push_back(table_description); if (flags.Test(CollectFlag::kAddIndexes)) { TRACE(Substitute("Locking object with id $0", table_description.table_info->id())); if (lock->is_index()) { return STATUS(InvalidArgument, "Expected table, but found index", table_description.table_info->id(), MasterError(MasterErrorPB::INVALID_TABLE_TYPE)); } if (lock->table_type() == PGSQL_TABLE_TYPE) { return STATUS(InvalidArgument, "Getting indexes for YSQL table is not supported", table_description.table_info->id(), MasterError(MasterErrorPB::INVALID_TABLE_TYPE)); } auto collect_index_flags = flags; // Don't need to collect indexes for index. collect_index_flags.Reset(CollectFlag::kAddIndexes); for (const auto& index_info : lock->pb.indexes()) { LOG_IF(DFATAL, table_description.table_info->id() != index_info.indexed_table_id()) << "Wrong indexed table id in index descriptor"; TableIdentifierPB index_id_pb; index_id_pb.set_table_id(index_info.table_id()); index_id_pb.mutable_namespace_()->set_id(table_description.namespace_info->id()); auto index_description = VERIFY_RESULT(DescribeTable( index_id_pb, flags.Test(CollectFlag::kSucceedIfCreateInProgress))); RETURN_NOT_OK(CollectTable( index_description, collect_index_flags, all_tables, parent_colocated_table_ids)); } } return Status::OK(); } Result<vector<TableDescription>> CatalogManager::CollectTables( const google::protobuf::RepeatedPtrField<TableIdentifierPB>& table_identifiers, CollectFlags flags, std::unordered_set<NamespaceId>* namespaces) { std::vector<std::pair<TableInfoPtr, CollectFlags>> table_with_flags; { SharedLock lock(mutex_); for (const auto& table_id_pb : table_identifiers) { if (table_id_pb.table_name().empty() && table_id_pb.table_id().empty() && table_id_pb.has_namespace_()) { auto namespace_info = FindNamespaceUnlocked(table_id_pb.namespace_()); if (!namespace_info.ok()) { if (namespace_info.status().IsNotFound()) { continue; } return namespace_info.status(); } if (namespaces) { namespaces->insert((**namespace_info).id()); } auto ns_collect_flags = flags; // Don't collect indexes, since they should be in the same namespace and will be collected // as regular tables. // It is necessary because we don't support kAddIndexes for YSQL tables. ns_collect_flags.Reset(CollectFlag::kAddIndexes); VLOG_WITH_PREFIX_AND_FUNC(1) << "Collecting all tables from: " << (**namespace_info).ToString() << ", specified as: " << table_id_pb.namespace_().ShortDebugString(); for (const auto& id_and_table : *table_ids_map_) { if (id_and_table.second->is_system()) { VLOG_WITH_PREFIX_AND_FUNC(4) << "Rejected system table: " << AsString(id_and_table); continue; } auto lock = id_and_table.second->LockForRead(); if (lock->namespace_id() != (**namespace_info).id()) { VLOG_WITH_PREFIX_AND_FUNC(4) << "Rejected table from other namespace: " << AsString(id_and_table); continue; } VLOG_WITH_PREFIX_AND_FUNC(4) << "Accepted: " << AsString(id_and_table); table_with_flags.emplace_back(id_and_table.second, ns_collect_flags); } } else { auto table = VERIFY_RESULT(FindTableUnlocked(table_id_pb)); VLOG_WITH_PREFIX_AND_FUNC(1) << "Collecting table: " << table->ToString(); table_with_flags.emplace_back(table, flags); } } } std::sort(table_with_flags.begin(), table_with_flags.end(), [](const auto& p1, const auto& p2) { return p1.first->id() < p2.first->id(); }); std::vector<TableDescription> all_tables; std::unordered_set<NamespaceId> parent_colocated_table_ids; const TableId* table_id = nullptr; for (auto& table_and_flags : table_with_flags) { if (table_id && *table_id == table_and_flags.first->id()) { return STATUS_FORMAT(InternalError, "Table collected twice $0", *table_id); } auto description = VERIFY_RESULT(DescribeTable( table_and_flags.first, table_and_flags.second.Test(CollectFlag::kSucceedIfCreateInProgress))); RETURN_NOT_OK(CollectTable( description, table_and_flags.second, &all_tables, &parent_colocated_table_ids)); table_id = &table_and_flags.first->id(); } return all_tables; } Result<std::vector<TableDescription>> CatalogManager::CollectTables( const google::protobuf::RepeatedPtrField<TableIdentifierPB>& table_identifiers, bool add_indexes, bool include_parent_colocated_table) { CollectFlags flags; flags.SetIf(CollectFlag::kAddIndexes, add_indexes); flags.SetIf(CollectFlag::kIncludeParentColocatedTable, include_parent_colocated_table); return CollectTables(table_identifiers, flags); } Status CatalogManager::GetYQLPartitionsVTable(std::shared_ptr<SystemTablet>* tablet) { scoped_refptr<TableInfo> table = FindPtrOrNull(table_names_map_, std::make_pair(kSystemNamespaceId, kSystemPartitionsTableName)); SCHECK(table != nullptr, NotFound, "YQL system.partitions table not found"); vector<scoped_refptr<TabletInfo>> tablets; table->GetAllTablets(&tablets); SCHECK(tablets.size() == 1, NotFound, "YQL system.partitions tablet not found"); *tablet = std::dynamic_pointer_cast<SystemTablet>( VERIFY_RESULT(GetSystemTablet(tablets[0]->tablet_id()))); return Status::OK(); } void CatalogManager::RebuildYQLSystemPartitions() { if (FLAGS_partitions_vtable_cache_refresh_secs > 0) { SCOPED_LEADER_SHARED_LOCK(l, this); if (l.catalog_status().ok() && l.leader_status().ok()) { if (system_partitions_tablet_ != nullptr) { auto s = ResultToStatus(down_cast<const YQLPartitionsVTable&>( system_partitions_tablet_->QLStorage()).GenerateAndCacheData()); if (!s.ok()) { LOG(ERROR) << "Error rebuilding system.partitions: " << s.ToString(); } } else { LOG(ERROR) << "Error finding system.partitions vtable."; } } } auto wait_time = FLAGS_partitions_vtable_cache_refresh_secs * 1s; if (wait_time <= 0s) { wait_time = kDefaultYQLPartitionsRefreshBgTaskSleep; } refresh_yql_partitions_task_.Schedule([this](const Status& status) { WARN_NOT_OK( background_tasks_thread_pool_->SubmitFunc([this]() { RebuildYQLSystemPartitions(); }), "Failed to schedule: RebuildYQLSystemPartitions"); }, wait_time); } Status CatalogManager::SysCatalogRespectLeaderAffinity() { auto l = cluster_config_->LockForRead(); const auto& affinitized_leaders = l->pb.replication_info().affinitized_leaders(); if (affinitized_leaders.empty()) { return Status::OK(); } for (const CloudInfoPB& cloud_info : affinitized_leaders) { // Do nothing if already in an affinitized zone. if (CatalogManagerUtil::IsCloudInfoEqual(cloud_info, server_registration_.cloud_info())) { return Status::OK(); } } // Not in affinitized zone, try finding a master to send a step down request to. std::vector<ServerEntryPB> masters; RETURN_NOT_OK(master_->ListMasters(&masters)); for (const ServerEntryPB& master : masters) { auto master_cloud_info = master.registration().cloud_info(); for (const CloudInfoPB& config_cloud_info : affinitized_leaders) { if (CatalogManagerUtil::IsCloudInfoEqual(config_cloud_info, master_cloud_info)) { if (PREDICT_FALSE( GetAtomicFlag(&FLAGS_TEST_crash_server_on_sys_catalog_leader_affinity_move))) { LOG_WITH_PREFIX(FATAL) << "For test: Crashing the server instead of performing sys " "catalog leader affinity move."; } YB_LOG_WITH_PREFIX_EVERY_N_SECS(INFO, 10) << "Sys catalog tablet is not in an affinitized zone, " << "sending step down request to master uuid " << master.instance_id().permanent_uuid() << " in zone " << TSDescriptor::generate_placement_id(master_cloud_info); std::shared_ptr<TabletPeer> tablet_peer; RETURN_NOT_OK(GetTabletPeer(sys_catalog_->tablet_id(), &tablet_peer)); consensus::LeaderStepDownRequestPB req; req.set_tablet_id(sys_catalog_->tablet_id()); req.set_dest_uuid(sys_catalog_->tablet_peer()->permanent_uuid()); req.set_new_leader_uuid(master.instance_id().permanent_uuid()); consensus::LeaderStepDownResponsePB resp; RETURN_NOT_OK(tablet_peer->consensus()->StepDown(&req, &resp)); if (resp.has_error()) { YB_LOG_WITH_PREFIX_EVERY_N_SECS(INFO, 10) << "Step down failed: " << resp.error().status().message(); break; } LOG_WITH_PREFIX(INFO) << "Successfully stepped down to new master"; return Status::OK(); } } } return STATUS(NotFound, "Couldn't step down to a master in an affinitized zone"); } BlacklistSet CatalogManager::BlacklistSetFromPB() const { auto l = cluster_config_->LockForRead(); const auto& blacklist_pb = l->pb.server_blacklist(); BlacklistSet blacklist_set; for (int i = 0; i < blacklist_pb.hosts_size(); i++) { blacklist_set.insert(HostPortFromPB(blacklist_pb.hosts(i))); } return blacklist_set; } void CatalogManager::ProcessTabletStorageMetadata( const std::string& ts_uuid, const TabletDriveStorageMetadataPB& storage_metadata) { const string& tablet_id = storage_metadata.tablet_id(); scoped_refptr<TabletInfo> tablet; { SharedLock lock(mutex_); tablet = FindPtrOrNull(*tablet_map_, tablet_id); } if (!tablet) { VLOG(1) << Format("Tablet $0 not found on ts $1", tablet_id, ts_uuid); return; } TabletReplicaDriveInfo drive_info{ storage_metadata.sst_file_size(), storage_metadata.wal_file_size(), storage_metadata.uncompressed_sst_file_size(), storage_metadata.may_have_orphaned_post_split_data()}; tablet->UpdateReplicaDriveInfo(ts_uuid, drive_info); WARN_NOT_OK( tablet_split_manager_.ProcessLiveTablet(*tablet, ts_uuid, drive_info), "Failed to process tablet split candidate."); } void CatalogManager::CheckTableDeleted(const TableInfoPtr& table) { if (!FLAGS_master_drop_table_after_task_response) { return; } // Since this is called after every successful async DeleteTablet, it's possible if all tasks // complete, for us to mark the table as DELETED/HIDDEN asap. This is desirable as clients will // wait for this before returning success to the user. // // However, if tasks fail, timeout, or are aborted, we still have the background thread as a // catch all. auto lock = MaybeTransitionTableToDeleted(table); if (!lock.locked()) { return; } Status s = sys_catalog_->Upsert(leader_ready_term(), table);; if (!s.ok()) { LOG_WITH_PREFIX(WARNING) << "Error marking table as " << (table->LockForRead()->started_deleting() ? "DELETED" : "HIDDEN") << ": " << s; return; } lock.Commit(); } } // namespace master } // namespace yb
#pragma once #include "msqlite/for_each.hpp" #include "msqlite/throws/value_or_throw.hpp" namespace msqlite::throws { template<typename F> inline void for_each(stmt& stmt, F&& f) { return value_or_throw(::msqlite::for_each(stmt, std::forward<F>(f))); } }
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2014 The Bitcoin developers // Copyright (c) 2015-2018 The ALG developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "primitives/block.h" #include "primitives/transaction.h" #include "chain.h" #include "hash.h" #include "main.h" #include "tinyformat.h" #include "utilstrencodings.h" #include "transaction.h" #include <boost/foreach.hpp> extern bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock, bool fAllowSlow); std::string COutPoint::ToString() const { return strprintf("COutPoint(%s, %u)", hash.ToString()/*.substr(0,10)*/, n); } std::string COutPoint::ToStringShort() const { return strprintf("%s-%u", hash.ToString().substr(0,64), n); } uint256 COutPoint::GetHash() { return Hash(BEGIN(hash), END(hash), BEGIN(n), END(n)); } CTxIn::CTxIn(COutPoint prevoutIn, CScript scriptSigIn, uint32_t nSequenceIn) { prevout = prevoutIn; scriptSig = scriptSigIn; nSequence = nSequenceIn; } CTxIn::CTxIn(uint256 hashPrevTx, uint32_t nOut, CScript scriptSigIn, uint32_t nSequenceIn) { prevout = COutPoint(hashPrevTx, nOut); scriptSig = scriptSigIn; nSequence = nSequenceIn; } CTxIn::CTxIn(const libzerocoin::CoinSpend& spend, libzerocoin::CoinDenomination denom) { //Serialize the coinspend object and append it to a CScript CDataStream serializedCoinSpend(SER_NETWORK, PROTOCOL_VERSION); serializedCoinSpend << spend; std::vector<unsigned char> data(serializedCoinSpend.begin(), serializedCoinSpend.end()); scriptSig = CScript() << OP_ZEROCOINSPEND << data.size(); scriptSig.insert(scriptSig.end(), data.begin(), data.end()); prevout.SetNull(); nSequence = denom; } std::string CTxIn::ToString() const { std::string str; str += "CTxIn("; str += prevout.ToString(); if (prevout.IsNull()) if(scriptSig.IsZerocoinSpend()) str += strprintf(", zerocoinspend %s...", HexStr(scriptSig).substr(0, 25)); else str += strprintf(", coinbase %s", HexStr(scriptSig)); else str += strprintf(", scriptSig=%s", scriptSig.ToString().substr(0,24)); if (nSequence != std::numeric_limits<unsigned int>::max()) str += strprintf(", nSequence=%u", nSequence); str += ")"; return str; } CTxOut::CTxOut(const CAmount& nValueIn, CScript scriptPubKeyIn) { nValue = nValueIn; scriptPubKey = scriptPubKeyIn; nRounds = -10; } bool COutPoint::IsMasternodeReward(const CTransaction* tx) const { if(!tx->IsCoinStake()) return false; return (n == tx->vout.size() - 1) && (tx->vout[1].scriptPubKey != tx->vout[n].scriptPubKey); } uint256 CTxOut::GetHash() const { return SerializeHash(*this); } std::string CTxOut::ToString() const { return strprintf("CTxOut(nValue=%d.%08d, scriptPubKey=%s)", nValue / COIN, nValue % COIN, scriptPubKey.ToString().substr(0,30)); } CMutableTransaction::CMutableTransaction() : nVersion(CTransaction::CURRENT_VERSION), nLockTime(0) {} CMutableTransaction::CMutableTransaction(const CTransaction& tx) : nVersion(tx.nVersion), vin(tx.vin), vout(tx.vout), nLockTime(tx.nLockTime) {} uint256 CMutableTransaction::GetHash() const { return SerializeHash(*this); } std::string CMutableTransaction::ToString() const { std::string str; str += strprintf("CMutableTransaction(ver=%d, vin.size=%u, vout.size=%u, nLockTime=%u)\n", nVersion, vin.size(), vout.size(), nLockTime); for (unsigned int i = 0; i < vin.size(); i++) str += " " + vin[i].ToString() + "\n"; for (unsigned int i = 0; i < vout.size(); i++) str += " " + vout[i].ToString() + "\n"; return str; } void CTransaction::UpdateHash() const { *const_cast<uint256*>(&hash) = SerializeHash(*this); } CTransaction::CTransaction() : hash(), nVersion(CTransaction::CURRENT_VERSION), vin(), vout(), nLockTime(0) { } CTransaction::CTransaction(const CMutableTransaction &tx) : nVersion(tx.nVersion), vin(tx.vin), vout(tx.vout), nLockTime(tx.nLockTime) { UpdateHash(); } CTransaction& CTransaction::operator=(const CTransaction &tx) { *const_cast<int*>(&nVersion) = tx.nVersion; *const_cast<std::vector<CTxIn>*>(&vin) = tx.vin; *const_cast<std::vector<CTxOut>*>(&vout) = tx.vout; *const_cast<unsigned int*>(&nLockTime) = tx.nLockTime; *const_cast<uint256*>(&hash) = tx.hash; return *this; } bool CTransaction::IsCoinStake() const { if (vin.empty()) return false; // ppcoin: the coin stake transaction is marked with the first output empty bool fAllowNull = vin[0].scriptSig.IsZerocoinSpend(); if (vin[0].prevout.IsNull() && !fAllowNull) return false; return (vin.size() > 0 && vout.size() >= 2 && vout[0].IsEmpty()); } CAmount CTransaction::GetValueOut() const { CAmount nValueOut = 0; for (std::vector<CTxOut>::const_iterator it(vout.begin()); it != vout.end(); ++it) { // ALG: previously MoneyRange() was called here. This has been replaced with negative check and boundary wrap check. if (it->nValue < 0) throw std::runtime_error("CTransaction::GetValueOut() : value out of range : less than 0"); if ((nValueOut + it->nValue) < nValueOut) throw std::runtime_error("CTransaction::GetValueOut() : value out of range : wraps the int64_t boundary"); nValueOut += it->nValue; } return nValueOut; } CAmount CTransaction::GetZerocoinMinted() const { for (const CTxOut& txOut : vout) { if(!txOut.scriptPubKey.IsZerocoinMint()) continue; return txOut.nValue; } return CAmount(0); } bool CTransaction::UsesUTXO(const COutPoint out) { for (const CTxIn& in : vin) { if (in.prevout == out) return true; } return false; } std::list<COutPoint> CTransaction::GetOutPoints() const { std::list<COutPoint> listOutPoints; uint256 txHash = GetHash(); for (unsigned int i = 0; i < vout.size(); i++) listOutPoints.emplace_back(COutPoint(txHash, i)); return listOutPoints; } CAmount CTransaction::GetZerocoinSpent() const { if(!IsZerocoinSpend()) return 0; CAmount nValueOut = 0; for (const CTxIn& txin : vin) { if(!txin.scriptSig.IsZerocoinSpend()) continue; nValueOut += txin.nSequence * COIN; } return nValueOut; } int CTransaction::GetZerocoinMintCount() const { int nCount = 0; for (const CTxOut& out : vout) { if (out.scriptPubKey.IsZerocoinMint()) nCount++; } return nCount; } double CTransaction::ComputePriority(double dPriorityInputs, unsigned int nTxSize) const { nTxSize = CalculateModifiedSize(nTxSize); if (nTxSize == 0) return 0.0; return dPriorityInputs / nTxSize; } unsigned int CTransaction::CalculateModifiedSize(unsigned int nTxSize) const { // In order to avoid disincentivizing cleaning up the UTXO set we don't count // the constant overhead for each txin and up to 110 bytes of scriptSig (which // is enough to cover a compressed pubkey p2sh redemption) for priority. // Providing any more cleanup incentive than making additional inputs free would // risk encouraging people to create junk outputs to redeem later. if (nTxSize == 0) nTxSize = ::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION); for (std::vector<CTxIn>::const_iterator it(vin.begin()); it != vin.end(); ++it) { unsigned int offset = 41U + std::min(110U, (unsigned int)it->scriptSig.size()); if (nTxSize > offset) nTxSize -= offset; } return nTxSize; } std::string CTransaction::ToString() const { std::string str; str += strprintf("CTransaction(hash=%s, ver=%d, vin.size=%u, vout.size=%u, nLockTime=%u)\n", GetHash().ToString().substr(0,10), nVersion, vin.size(), vout.size(), nLockTime); for (unsigned int i = 0; i < vin.size(); i++) str += " " + vin[i].ToString() + "\n"; for (unsigned int i = 0; i < vout.size(); i++) str += " " + vout[i].ToString() + "\n"; return str; }
// // prioritised_handlers.cpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #include <boost/asio.hpp> #include <iostream> #include <memory> #include <queue> using boost::asio::ip::tcp; class handler_priority_queue : public boost::asio::execution_context { public: template <typename Function> void add(int priority, Function function) { std::unique_ptr<queued_handler_base> handler( new queued_handler<Function>( priority, std::move(function))); handlers_.push(std::move(handler)); } void execute_all() { while (!handlers_.empty()) { handlers_.top()->execute(); handlers_.pop(); } } class executor { public: executor(handler_priority_queue& q, int p) : context_(q), priority_(p) { } handler_priority_queue& context() const noexcept { return context_; } template <typename Function, typename Allocator> void dispatch(Function f, const Allocator&) const { context_.add(priority_, std::move(f)); } template <typename Function, typename Allocator> void post(Function f, const Allocator&) const { context_.add(priority_, std::move(f)); } template <typename Function, typename Allocator> void defer(Function f, const Allocator&) const { context_.add(priority_, std::move(f)); } void on_work_started() const noexcept {} void on_work_finished() const noexcept {} bool operator==(const executor& other) const noexcept { return &context_ == &other.context_ && priority_ == other.priority_; } bool operator!=(const executor& other) const noexcept { return !operator==(other); } private: handler_priority_queue& context_; int priority_; }; template <typename Handler> boost::asio::executor_binder<Handler, executor> wrap(int priority, Handler handler) { return boost::asio::bind_executor( executor(*this, priority), std::move(handler)); } private: class queued_handler_base { public: queued_handler_base(int p) : priority_(p) { } virtual ~queued_handler_base() { } virtual void execute() = 0; friend bool operator<(const std::unique_ptr<queued_handler_base>& a, const std::unique_ptr<queued_handler_base>& b) noexcept { return a->priority_ < b->priority_; } private: int priority_; }; template <typename Function> class queued_handler : public queued_handler_base { public: queued_handler(int p, Function f) : queued_handler_base(p), function_(std::move(f)) { } void execute() override { function_(); } private: Function function_; }; std::priority_queue<std::unique_ptr<queued_handler_base>> handlers_; }; //---------------------------------------------------------------------- void high_priority_handler(const boost::system::error_code& /*ec*/, tcp::socket /*socket*/) { std::cout << "High priority handler\n"; } void middle_priority_handler(const boost::system::error_code& /*ec*/) { std::cout << "Middle priority handler\n"; } struct low_priority_handler { // Make the handler a move-only type. low_priority_handler() = default; low_priority_handler(const low_priority_handler&) = delete; low_priority_handler(low_priority_handler&&) = default; void operator()() { std::cout << "Low priority handler\n"; } }; int main() { boost::asio::io_context io_context; handler_priority_queue pri_queue; // Post a completion handler to be run immediately. boost::asio::post(io_context, pri_queue.wrap(0, low_priority_handler())); // Start an asynchronous accept that will complete immediately. tcp::endpoint endpoint(boost::asio::ip::address_v4::loopback(), 0); tcp::acceptor acceptor(io_context, endpoint); tcp::socket server_socket(io_context); acceptor.async_accept(pri_queue.wrap(100, high_priority_handler)); tcp::socket client_socket(io_context); client_socket.connect(acceptor.local_endpoint()); // Set a deadline timer to expire immediately. boost::asio::steady_timer timer(io_context); timer.expires_at(boost::asio::steady_timer::clock_type::time_point::min()); timer.async_wait(pri_queue.wrap(42, middle_priority_handler)); while (io_context.run_one()) { // The custom invocation hook adds the handlers to the priority queue // rather than executing them from within the poll_one() call. while (io_context.poll_one()) ; pri_queue.execute_all(); } return 0; }
// Copyright 2018-2021 The Autoware Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "trajectory_follower/mpc.hpp" #include <algorithm> #include <deque> #include <limits> #include <memory> #include <string> #include <utility> #include <vector> #define DEG2RAD 3.1415926535 / 180.0 #define RAD2DEG 180.0 / 3.1415926535 namespace autoware { namespace motion { namespace control { namespace trajectory_follower { using namespace std::literals::chrono_literals; using ::motion::motion_common::to_angle; bool8_t MPC::calculateMPC( const autoware_auto_vehicle_msgs::msg::SteeringReport & current_steer, const float64_t current_velocity, const geometry_msgs::msg::Pose & current_pose, autoware_auto_control_msgs::msg::AckermannLateralCommand & ctrl_cmd, autoware_auto_planning_msgs::msg::Trajectory & predicted_traj, autoware_auto_system_msgs::msg::Float32MultiArrayDiagnostic & diagnostic) { /* recalculate velocity from ego-velocity with dynamics */ trajectory_follower::MPCTrajectory reference_trajectory = applyVelocityDynamicsFilter(m_ref_traj, current_pose, current_velocity); MPCData mpc_data; if (!getData(reference_trajectory, current_steer, current_pose, &mpc_data)) { RCLCPP_WARN_THROTTLE(m_logger, *m_clock, 1000 /*ms*/, "fail to get Data."); return false; } /* define initial state for error dynamics */ Eigen::VectorXd x0 = getInitialState(mpc_data); /* delay compensation */ if (!updateStateForDelayCompensation(reference_trajectory, mpc_data.nearest_time, &x0)) { RCLCPP_WARN_SKIPFIRST_THROTTLE( m_logger, *m_clock, 1000 /*ms*/, "updateStateForDelayCompensation failed. stop computation."); return false; } /* resample ref_traj with mpc sampling time */ trajectory_follower::MPCTrajectory mpc_resampled_ref_traj; const float64_t mpc_start_time = mpc_data.nearest_time + m_param.input_delay; if (!resampleMPCTrajectoryByTime(mpc_start_time, reference_trajectory, &mpc_resampled_ref_traj)) { RCLCPP_WARN_THROTTLE(m_logger, *m_clock, 1000 /*ms*/, "trajectory resampling failed."); return false; } /* generate mpc matrix : predict equation Xec = Aex * x0 + Bex * Uex + Wex */ MPCMatrix mpc_matrix = generateMPCMatrix(mpc_resampled_ref_traj); /* solve quadratic optimization */ Eigen::VectorXd Uex; if (!executeOptimization(mpc_matrix, x0, &Uex)) { RCLCPP_WARN_THROTTLE(m_logger, *m_clock, 1000 /*ms*/, "optimization failed."); return false; } /* apply saturation and filter */ const float64_t u_saturated = std::max(std::min(Uex(0), m_steer_lim), -m_steer_lim); const float64_t u_filtered = m_lpf_steering_cmd.filter(u_saturated); /* set control command */ { const auto & dt = m_param.prediction_dt; ctrl_cmd.steering_tire_angle = static_cast<float>(u_filtered); ctrl_cmd.steering_tire_rotation_rate = static_cast<float>((Uex(1) - Uex(0)) / dt); } storeSteerCmd(u_filtered); /* save input to buffer for delay compensation*/ m_input_buffer.push_back(ctrl_cmd.steering_tire_angle); m_input_buffer.pop_front(); m_raw_steer_cmd_pprev = m_raw_steer_cmd_prev; m_raw_steer_cmd_prev = Uex(0); /* calculate predicted trajectory */ Eigen::VectorXd Xex = mpc_matrix.Aex * x0 + mpc_matrix.Bex * Uex + mpc_matrix.Wex; trajectory_follower::MPCTrajectory mpc_predicted_traj; const auto & traj = mpc_resampled_ref_traj; for (size_t i = 0; i < static_cast<size_t>(m_param.prediction_horizon); ++i) { const int64_t DIM_X = m_vehicle_model_ptr->getDimX(); const float64_t lat_error = Xex(static_cast<int64_t>(i) * DIM_X); const float64_t yaw_error = Xex(static_cast<int64_t>(i) * DIM_X + 1); const float64_t x = traj.x[i] - std::sin(traj.yaw[i]) * lat_error; const float64_t y = traj.y[i] + std::cos(traj.yaw[i]) * lat_error; const float64_t z = traj.z[i]; const float64_t yaw = traj.yaw[i] + yaw_error; const float64_t vx = traj.vx[i]; const float64_t k = traj.k[i]; const float64_t smooth_k = traj.smooth_k[i]; const float64_t relative_time = traj.relative_time[i]; mpc_predicted_traj.push_back(x, y, z, yaw, vx, k, smooth_k, relative_time); } trajectory_follower::MPCUtils::convertToAutowareTrajectory(mpc_predicted_traj, predicted_traj); /* prepare diagnostic message */ const float64_t nearest_k = reference_trajectory.k[static_cast<size_t>(mpc_data.nearest_idx)]; const float64_t nearest_smooth_k = reference_trajectory.smooth_k[static_cast<size_t>(mpc_data.nearest_idx)]; const float64_t steer_cmd = ctrl_cmd.steering_tire_angle; const float64_t wb = m_vehicle_model_ptr->getWheelbase(); typedef decltype(diagnostic.diag_array.data)::value_type DiagnosticValueType; auto append_diag_data = [&](const auto & val) -> void { diagnostic.diag_array.data.push_back(static_cast<DiagnosticValueType>(val)); }; // [0] final steering command (MPC + LPF) append_diag_data(steer_cmd); // [1] mpc calculation result append_diag_data(Uex(0)); // [2] feedforward steering value append_diag_data(mpc_matrix.Uref_ex(0)); // [3] feedforward steering value raw append_diag_data(std::atan(nearest_smooth_k * wb)); // [4] current steering angle append_diag_data(mpc_data.steer); // [5] lateral error append_diag_data(mpc_data.lateral_err); // [6] current_pose yaw append_diag_data(to_angle(current_pose.orientation)); // [7] nearest_pose yaw append_diag_data(to_angle(mpc_data.nearest_pose.orientation)); // [8] yaw error append_diag_data(mpc_data.yaw_err); // [9] reference velocity append_diag_data(reference_trajectory.vx[static_cast<size_t>(mpc_data.nearest_idx)]); // [10] measured velocity append_diag_data(current_velocity); // [11] angvel from steer command append_diag_data(current_velocity * tan(steer_cmd) / wb); // [12] angvel from measured steer append_diag_data(current_velocity * tan(mpc_data.steer) / wb); // [13] angvel from path curvature append_diag_data(current_velocity * nearest_smooth_k); // [14] nearest path curvature (used for feedforward) append_diag_data(nearest_smooth_k); // [15] nearest path curvature (not smoothed) append_diag_data(nearest_k); // [16] predicted steer append_diag_data(mpc_data.predicted_steer); // [17] angvel from predicted steer append_diag_data(current_velocity * tan(mpc_data.predicted_steer) / wb); return true; } void MPC::setReferenceTrajectory( const autoware_auto_planning_msgs::msg::Trajectory & trajectory_msg, const float64_t traj_resample_dist, const bool8_t enable_path_smoothing, const int64_t path_filter_moving_ave_num, const int64_t curvature_smoothing_num_traj, const int64_t curvature_smoothing_num_ref_steer, const geometry_msgs::msg::PoseStamped::SharedPtr current_pose_ptr) { trajectory_follower::MPCTrajectory mpc_traj_raw; // received raw trajectory trajectory_follower::MPCTrajectory mpc_traj_resampled; // resampled trajectory trajectory_follower::MPCTrajectory mpc_traj_smoothed; // smooth filtered trajectory /* resampling */ trajectory_follower::MPCUtils::convertToMPCTrajectory(trajectory_msg, mpc_traj_raw); if (!trajectory_follower::MPCUtils::resampleMPCTrajectoryByDistance( mpc_traj_raw, traj_resample_dist, &mpc_traj_resampled)) { RCLCPP_WARN(m_logger, "[setReferenceTrajectory] spline error when resampling by distance"); return; } /* path smoothing */ mpc_traj_smoothed = mpc_traj_resampled; const int64_t mpc_traj_resampled_size = static_cast<int64_t>(mpc_traj_resampled.size()); if (enable_path_smoothing && mpc_traj_resampled_size > 2 * path_filter_moving_ave_num) { if ( !trajectory_follower::MoveAverageFilter::filt_vector( path_filter_moving_ave_num, mpc_traj_smoothed.x) || !trajectory_follower::MoveAverageFilter::filt_vector( path_filter_moving_ave_num, mpc_traj_smoothed.y) || !trajectory_follower::MoveAverageFilter::filt_vector( path_filter_moving_ave_num, mpc_traj_smoothed.yaw) || !trajectory_follower::MoveAverageFilter::filt_vector( path_filter_moving_ave_num, mpc_traj_smoothed.vx)) { RCLCPP_DEBUG(m_logger, "path callback: filtering error. stop filtering."); mpc_traj_smoothed = mpc_traj_resampled; } } /* calculate yaw angle */ if (current_pose_ptr) { const int64_t nearest_idx = MPCUtils::calcNearestIndex(mpc_traj_smoothed, current_pose_ptr->pose); const float64_t ego_yaw = tf2::getYaw(current_pose_ptr->pose.orientation); trajectory_follower::MPCUtils::calcTrajectoryYawFromXY( &mpc_traj_smoothed, nearest_idx, ego_yaw); trajectory_follower::MPCUtils::convertEulerAngleToMonotonic(&mpc_traj_smoothed.yaw); } /* calculate curvature */ trajectory_follower::MPCUtils::calcTrajectoryCurvature( static_cast<size_t>(curvature_smoothing_num_traj), static_cast<size_t>(curvature_smoothing_num_ref_steer), &mpc_traj_smoothed); /* add end point with vel=0 on traj for mpc prediction */ { auto & t = mpc_traj_smoothed; const float64_t t_ext = 100.0; // extra time to prevent mpc calcul failure due to short time const float64_t t_end = t.relative_time.back() + getPredictionTime() + t_ext; const float64_t v_end = 0.0; t.vx.back() = v_end; // set for end point t.push_back( t.x.back(), t.y.back(), t.z.back(), t.yaw.back(), v_end, t.k.back(), t.smooth_k.back(), t_end); } if (!mpc_traj_smoothed.size()) { RCLCPP_DEBUG(m_logger, "path callback: trajectory size is undesired."); return; } m_ref_traj = mpc_traj_smoothed; } void MPC::resetPrevResult(const autoware_auto_vehicle_msgs::msg::SteeringReport & current_steer) { m_raw_steer_cmd_prev = current_steer.steering_tire_angle; m_raw_steer_cmd_pprev = current_steer.steering_tire_angle; } bool8_t MPC::getData( const trajectory_follower::MPCTrajectory & traj, const autoware_auto_vehicle_msgs::msg::SteeringReport & current_steer, const geometry_msgs::msg::Pose & current_pose, MPCData * data) { static constexpr auto duration = 5000 /*ms*/; size_t nearest_idx; if (!trajectory_follower::MPCUtils::calcNearestPoseInterp( traj, current_pose, &(data->nearest_pose), &(nearest_idx), &(data->nearest_time), m_logger, *m_clock)) { // reset previous MPC result // Note: When a large deviation from the trajectory occurs, the optimization stops and // the vehicle will return to the path by re-planning the trajectory or external operation. // After the recovery, the previous value of the optimization may deviate greatly from // the actual steer angle, and it may make the optimization result unstable. resetPrevResult(current_steer); RCLCPP_WARN_SKIPFIRST_THROTTLE( m_logger, *m_clock, duration, "calculateMPC: error in calculating nearest pose. stop mpc."); return false; } /* get data */ data->nearest_idx = static_cast<int64_t>(nearest_idx); data->steer = static_cast<float64_t>(current_steer.steering_tire_angle); data->lateral_err = trajectory_follower::MPCUtils::calcLateralError(current_pose, data->nearest_pose); data->yaw_err = autoware::common::helper_functions::wrap_angle( to_angle(current_pose.orientation) - to_angle(data->nearest_pose.orientation)); /* get predicted steer */ if (!m_steer_prediction_prev) { m_steer_prediction_prev = std::make_shared<float64_t>(current_steer.steering_tire_angle); } data->predicted_steer = calcSteerPrediction(); *m_steer_prediction_prev = data->predicted_steer; /* check error limit */ const float64_t dist_err = autoware::common::geometry::distance_2d<float64_t>( current_pose.position, data->nearest_pose.position); if (dist_err > m_admissible_position_error) { RCLCPP_WARN_SKIPFIRST_THROTTLE( m_logger, *m_clock, duration, "position error is over limit. error = %fm, limit: %fm", dist_err, m_admissible_position_error); return false; } /* check yaw error limit */ if (std::fabs(data->yaw_err) > m_admissible_yaw_error_rad) { RCLCPP_WARN_SKIPFIRST_THROTTLE( m_logger, *m_clock, duration, "yaw error is over limit. error = %f deg, limit %f deg", RAD2DEG * data->yaw_err, RAD2DEG * m_admissible_yaw_error_rad); return false; } /* check trajectory time length */ auto end_time = data->nearest_time + m_param.input_delay + getPredictionTime(); if (end_time > traj.relative_time.back()) { RCLCPP_WARN_SKIPFIRST_THROTTLE( m_logger, *m_clock, 1000 /*ms*/, "path is too short for prediction."); return false; } return true; } float64_t MPC::calcSteerPrediction() { auto t_start = m_time_prev; auto t_end = m_clock->now(); m_time_prev = t_end; const float64_t duration = (t_end - t_start).seconds(); const float64_t time_constant = m_param.steer_tau; const float64_t initial_response = std::exp(-duration / time_constant) * (*m_steer_prediction_prev); if (m_ctrl_cmd_vec.size() <= 2) { return initial_response; } return initial_response + getSteerCmdSum(t_start, t_end, time_constant); } float64_t MPC::getSteerCmdSum( const rclcpp::Time & t_start, const rclcpp::Time & t_end, const float64_t time_constant) const { if (m_ctrl_cmd_vec.size() <= 2) { return 0.0; } // Find first index of control command container size_t idx = 1; while (t_start > rclcpp::Time(m_ctrl_cmd_vec.at(idx).stamp)) { if ((idx + 1) >= m_ctrl_cmd_vec.size()) { return 0.0; } ++idx; } // Compute steer command input response float64_t steer_sum = 0.0; auto t = t_start; while (t_end > rclcpp::Time(m_ctrl_cmd_vec.at(idx).stamp)) { const float64_t duration = (rclcpp::Time(m_ctrl_cmd_vec.at(idx).stamp) - t).seconds(); t = rclcpp::Time(m_ctrl_cmd_vec.at(idx).stamp); steer_sum += (1 - std::exp(-duration / time_constant)) * static_cast<float64_t>(m_ctrl_cmd_vec.at(idx - 1).steering_tire_angle); ++idx; if (idx >= m_ctrl_cmd_vec.size()) { break; } } const float64_t duration = (t_end - t).seconds(); steer_sum += (1 - std::exp(-duration / time_constant)) * static_cast<float64_t>(m_ctrl_cmd_vec.at(idx - 1).steering_tire_angle); return steer_sum; } void MPC::storeSteerCmd(const float64_t steer) { const auto time_delayed = m_clock->now() + rclcpp::Duration::from_seconds(m_param.input_delay); autoware_auto_control_msgs::msg::AckermannLateralCommand cmd; cmd.stamp = time_delayed; cmd.steering_tire_angle = static_cast<float>(steer); // store published ctrl cmd m_ctrl_cmd_vec.emplace_back(cmd); if (m_ctrl_cmd_vec.size() <= 2) { return; } // remove unused ctrl cmd constexpr float64_t store_time = 0.3; if ((time_delayed - m_ctrl_cmd_vec.at(1).stamp).seconds() > m_param.input_delay + store_time) { m_ctrl_cmd_vec.erase(m_ctrl_cmd_vec.begin()); } } bool8_t MPC::resampleMPCTrajectoryByTime( float64_t ts, const trajectory_follower::MPCTrajectory & input, trajectory_follower::MPCTrajectory * output) const { std::vector<float64_t> mpc_time_v; for (float64_t i = 0; i < static_cast<float64_t>(m_param.prediction_horizon); ++i) { mpc_time_v.push_back(ts + i * m_param.prediction_dt); } if (!trajectory_follower::MPCUtils::linearInterpMPCTrajectory( input.relative_time, input, mpc_time_v, output)) { RCLCPP_WARN_SKIPFIRST_THROTTLE( m_logger, *m_clock, 1000 /*ms*/, "calculateMPC: mpc resample error. stop mpc calculation. check code!"); return false; } return true; } Eigen::VectorXd MPC::getInitialState(const MPCData & data) { const int64_t DIM_X = m_vehicle_model_ptr->getDimX(); Eigen::VectorXd x0 = Eigen::VectorXd::Zero(DIM_X); const auto & lat_err = data.lateral_err; const auto & steer = m_use_steer_prediction ? data.predicted_steer : data.steer; const auto & yaw_err = data.yaw_err; if (m_vehicle_model_type == "kinematics") { x0 << lat_err, yaw_err, steer; } else if (m_vehicle_model_type == "kinematics_no_delay") { x0 << lat_err, yaw_err; } else if (m_vehicle_model_type == "dynamics") { float64_t dlat = (lat_err - m_lateral_error_prev) / m_ctrl_period; float64_t dyaw = (yaw_err - m_yaw_error_prev) / m_ctrl_period; m_lateral_error_prev = lat_err; m_yaw_error_prev = yaw_err; dlat = m_lpf_lateral_error.filter(dlat); dyaw = m_lpf_yaw_error.filter(dyaw); x0 << lat_err, dlat, yaw_err, dyaw; RCLCPP_DEBUG(m_logger, "(before lpf) dot_lat_err = %f, dot_yaw_err = %f", dlat, dyaw); RCLCPP_DEBUG(m_logger, "(after lpf) dot_lat_err = %f, dot_yaw_err = %f", dlat, dyaw); } else { RCLCPP_ERROR(m_logger, "vehicle_model_type is undefined"); } return x0; } bool8_t MPC::updateStateForDelayCompensation( const trajectory_follower::MPCTrajectory & traj, const float64_t & start_time, Eigen::VectorXd * x) { const int64_t DIM_X = m_vehicle_model_ptr->getDimX(); const int64_t DIM_U = m_vehicle_model_ptr->getDimU(); const int64_t DIM_Y = m_vehicle_model_ptr->getDimY(); Eigen::MatrixXd Ad(DIM_X, DIM_X); Eigen::MatrixXd Bd(DIM_X, DIM_U); Eigen::MatrixXd Wd(DIM_X, 1); Eigen::MatrixXd Cd(DIM_Y, DIM_X); Eigen::MatrixXd x_curr = *x; float64_t mpc_curr_time = start_time; for (uint64_t i = 0; i < m_input_buffer.size(); ++i) { float64_t k = 0.0; float64_t v = 0.0; if ( !trajectory_follower::linearInterpolate(traj.relative_time, traj.k, mpc_curr_time, k) || !trajectory_follower::linearInterpolate(traj.relative_time, traj.vx, mpc_curr_time, v)) { RCLCPP_ERROR( m_logger, "mpc resample error at delay compensation, stop mpc calculation. check code!"); return false; } /* get discrete state matrix A, B, C, W */ m_vehicle_model_ptr->setVelocity(v); m_vehicle_model_ptr->setCurvature(k); m_vehicle_model_ptr->calculateDiscreteMatrix(Ad, Bd, Cd, Wd, m_ctrl_period); Eigen::MatrixXd ud = Eigen::MatrixXd::Zero(DIM_U, 1); ud(0, 0) = m_input_buffer.at(i); // for steering input delay x_curr = Ad * x_curr + Bd * ud + Wd; mpc_curr_time += m_ctrl_period; } *x = x_curr; return true; } trajectory_follower::MPCTrajectory MPC::applyVelocityDynamicsFilter( const trajectory_follower::MPCTrajectory & input, const geometry_msgs::msg::Pose & current_pose, const float64_t v0) const { int64_t nearest_idx = trajectory_follower::MPCUtils::calcNearestIndex(input, current_pose); if (nearest_idx < 0) { return input; } const float64_t acc_lim = m_param.acceleration_limit; const float64_t tau = m_param.velocity_time_constant; trajectory_follower::MPCTrajectory output = input; trajectory_follower::MPCUtils::dynamicSmoothingVelocity( static_cast<size_t>(nearest_idx), v0, acc_lim, tau, output); const float64_t t_ext = 100.0; // extra time to prevent mpc calculation failure due to short time const float64_t t_end = output.relative_time.back() + getPredictionTime() + t_ext; const float64_t v_end = 0.0; output.vx.back() = v_end; // set for end point output.push_back( output.x.back(), output.y.back(), output.z.back(), output.yaw.back(), v_end, output.k.back(), output.smooth_k.back(), t_end); return output; } /* * predict equation: Xec = Aex * x0 + Bex * Uex + Wex * cost function: J = Xex' * Qex * Xex + (Uex - Uref)' * R1ex * (Uex - Uref_ex) + Uex' * R2ex * Uex * Qex = diag([Q,Q,...]), R1ex = diag([R,R,...]) */ MPCMatrix MPC::generateMPCMatrix(const trajectory_follower::MPCTrajectory & reference_trajectory) { using Eigen::MatrixXd; const int64_t N = m_param.prediction_horizon; const float64_t DT = m_param.prediction_dt; const int64_t DIM_X = m_vehicle_model_ptr->getDimX(); const int64_t DIM_U = m_vehicle_model_ptr->getDimU(); const int64_t DIM_Y = m_vehicle_model_ptr->getDimY(); MPCMatrix m; m.Aex = MatrixXd::Zero(DIM_X * N, DIM_X); m.Bex = MatrixXd::Zero(DIM_X * N, DIM_U * N); m.Wex = MatrixXd::Zero(DIM_X * N, 1); m.Cex = MatrixXd::Zero(DIM_Y * N, DIM_X * N); m.Qex = MatrixXd::Zero(DIM_Y * N, DIM_Y * N); m.R1ex = MatrixXd::Zero(DIM_U * N, DIM_U * N); m.R2ex = MatrixXd::Zero(DIM_U * N, DIM_U * N); m.Uref_ex = MatrixXd::Zero(DIM_U * N, 1); /* weight matrix depends on the vehicle model */ MatrixXd Q = MatrixXd::Zero(DIM_Y, DIM_Y); MatrixXd R = MatrixXd::Zero(DIM_U, DIM_U); MatrixXd Q_adaptive = MatrixXd::Zero(DIM_Y, DIM_Y); MatrixXd R_adaptive = MatrixXd::Zero(DIM_U, DIM_U); MatrixXd Ad(DIM_X, DIM_X); MatrixXd Bd(DIM_X, DIM_U); MatrixXd Wd(DIM_X, 1); MatrixXd Cd(DIM_Y, DIM_X); MatrixXd Uref(DIM_U, 1); constexpr float64_t ep = 1.0e-3; // large enough to ignore velocity noise /* predict dynamics for N times */ for (int64_t i = 0; i < N; ++i) { const float64_t ref_vx = reference_trajectory.vx[static_cast<size_t>(i)]; const float64_t ref_vx_squared = ref_vx * ref_vx; // curvature will be 0 when vehicle stops const float64_t ref_k = reference_trajectory.k[static_cast<size_t>(i)] * m_sign_vx; const float64_t ref_smooth_k = reference_trajectory.smooth_k[static_cast<size_t>(i)] * m_sign_vx; /* get discrete state matrix A, B, C, W */ m_vehicle_model_ptr->setVelocity(ref_vx); m_vehicle_model_ptr->setCurvature(ref_k); m_vehicle_model_ptr->calculateDiscreteMatrix(Ad, Bd, Cd, Wd, DT); Q = Eigen::MatrixXd::Zero(DIM_Y, DIM_Y); R = Eigen::MatrixXd::Zero(DIM_U, DIM_U); Q(0, 0) = getWeightLatError(ref_k); Q(1, 1) = getWeightHeadingError(ref_k); R(0, 0) = getWeightSteerInput(ref_k); Q_adaptive = Q; R_adaptive = R; if (i == N - 1) { Q_adaptive(0, 0) = m_param.weight_terminal_lat_error; Q_adaptive(1, 1) = m_param.weight_terminal_heading_error; } Q_adaptive(1, 1) += ref_vx_squared * getWeightHeadingErrorSqVel(ref_k); R_adaptive(0, 0) += ref_vx_squared * getWeightSteerInputSqVel(ref_k); /* update mpc matrix */ int64_t idx_x_i = i * DIM_X; int64_t idx_x_i_prev = (i - 1) * DIM_X; int64_t idx_u_i = i * DIM_U; int64_t idx_y_i = i * DIM_Y; if (i == 0) { m.Aex.block(0, 0, DIM_X, DIM_X) = Ad; m.Bex.block(0, 0, DIM_X, DIM_U) = Bd; m.Wex.block(0, 0, DIM_X, 1) = Wd; } else { m.Aex.block(idx_x_i, 0, DIM_X, DIM_X) = Ad * m.Aex.block(idx_x_i_prev, 0, DIM_X, DIM_X); for (int64_t j = 0; j < i; ++j) { int64_t idx_u_j = j * DIM_U; m.Bex.block(idx_x_i, idx_u_j, DIM_X, DIM_U) = Ad * m.Bex.block(idx_x_i_prev, idx_u_j, DIM_X, DIM_U); } m.Wex.block(idx_x_i, 0, DIM_X, 1) = Ad * m.Wex.block(idx_x_i_prev, 0, DIM_X, 1) + Wd; } m.Bex.block(idx_x_i, idx_u_i, DIM_X, DIM_U) = Bd; m.Cex.block(idx_y_i, idx_x_i, DIM_Y, DIM_X) = Cd; m.Qex.block(idx_y_i, idx_y_i, DIM_Y, DIM_Y) = Q_adaptive; m.R1ex.block(idx_u_i, idx_u_i, DIM_U, DIM_U) = R_adaptive; /* get reference input (feed-forward) */ m_vehicle_model_ptr->setCurvature(ref_smooth_k); m_vehicle_model_ptr->calculateReferenceInput(Uref); if (std::fabs(Uref(0, 0)) < DEG2RAD * m_param.zero_ff_steer_deg) { Uref(0, 0) = 0.0; // ignore curvature noise } m.Uref_ex.block(i * DIM_U, 0, DIM_U, 1) = Uref; } /* add lateral jerk : weight for (v * {u(i) - u(i-1)} )^2 */ for (int64_t i = 0; i < N - 1; ++i) { const float64_t ref_vx = reference_trajectory.vx[static_cast<size_t>(i)]; m_sign_vx = ref_vx > ep ? 1 : (ref_vx < -ep ? -1 : m_sign_vx); const float64_t ref_k = reference_trajectory.k[static_cast<size_t>(i)] * m_sign_vx; const float64_t j = ref_vx * ref_vx * getWeightLatJerk(ref_k) / (DT * DT); const Eigen::Matrix2d J = (Eigen::Matrix2d() << j, -j, -j, j).finished(); m.R2ex.block(i, i, 2, 2) += J; } addSteerWeightR(&m.R1ex); return m; } /* * solve quadratic optimization. * cost function: J = Xex' * Qex * Xex + (Uex - Uref)' * R1ex * (Uex - Uref_ex) + Uex' * R2ex * Uex * , Qex = diag([Q,Q,...]), R1ex = diag([R,R,...]) * constraint matrix : lb < U < ub, lbA < A*U < ubA * current considered constraint * - steering limit * - steering rate limit * * (1)lb < u < ub && (2)lbA < Au < ubA --> (3)[lb, lbA] < [I, A]u < [ub, ubA] * (1)lb < u < ub ... * [-u_lim] < [ u0 ] < [u_lim] * [-u_lim] < [ u1 ] < [u_lim] * ~~~ * [-u_lim] < [ uN ] < [u_lim] (*N... DIM_U) * (2)lbA < Au < ubA ... * [prev_u0 - au_lim*ctp] < [ u0 ] < [prev_u0 + au_lim*ctp] (*ctp ... ctrl_period) * [ -au_lim * dt ] < [u1 - u0] < [ au_lim * dt ] * [ -au_lim * dt ] < [u2 - u1] < [ au_lim * dt ] * ~~~ * [ -au_lim * dt ] < [uN-uN-1] < [ au_lim * dt ] (*N... DIM_U) */ bool8_t MPC::executeOptimization( const MPCMatrix & m, const Eigen::VectorXd & x0, Eigen::VectorXd * Uex) { using Eigen::MatrixXd; using Eigen::VectorXd; if (!isValid(m)) { RCLCPP_WARN_SKIPFIRST_THROTTLE( m_logger, *m_clock, 1000 /*ms*/, "model matrix is invalid. stop MPC."); return false; } const int64_t DIM_U_N = m_param.prediction_horizon * m_vehicle_model_ptr->getDimU(); // cost function: 1/2 * Uex' * H * Uex + f' * Uex, H = B' * C' * Q * C * B + R const MatrixXd CB = m.Cex * m.Bex; const MatrixXd QCB = m.Qex * CB; // MatrixXd H = CB.transpose() * QCB + m.R1ex + m.R2ex; // This calculation is heavy. looking for // a good way. //NOLINT MatrixXd H = MatrixXd::Zero(DIM_U_N, DIM_U_N); H.triangularView<Eigen::Upper>() = CB.transpose() * QCB; H.triangularView<Eigen::Upper>() += m.R1ex + m.R2ex; H.triangularView<Eigen::Lower>() = H.transpose(); MatrixXd f = (m.Cex * (m.Aex * x0 + m.Wex)).transpose() * QCB - m.Uref_ex.transpose() * m.R1ex; addSteerWeightF(&f); MatrixXd A = MatrixXd::Identity(DIM_U_N, DIM_U_N); for (int64_t i = 1; i < DIM_U_N; i++) { A(i, i - 1) = -1.0; } VectorXd lb = VectorXd::Constant(DIM_U_N, -m_steer_lim); // min steering angle VectorXd ub = VectorXd::Constant(DIM_U_N, m_steer_lim); // max steering angle VectorXd lbA = VectorXd::Constant(DIM_U_N, -m_steer_rate_lim * m_param.prediction_dt); VectorXd ubA = VectorXd::Constant(DIM_U_N, m_steer_rate_lim * m_param.prediction_dt); lbA(0, 0) = m_raw_steer_cmd_prev - m_steer_rate_lim * m_ctrl_period; ubA(0, 0) = m_raw_steer_cmd_prev + m_steer_rate_lim * m_ctrl_period; auto t_start = std::chrono::system_clock::now(); bool8_t solve_result = m_qpsolver_ptr->solve(H, f.transpose(), A, lb, ub, lbA, ubA, *Uex); auto t_end = std::chrono::system_clock::now(); if (!solve_result) { RCLCPP_WARN_SKIPFIRST_THROTTLE(m_logger, *m_clock, 1000 /*ms*/, "qp solver error"); return false; } { auto t = std::chrono::duration_cast<std::chrono::milliseconds>(t_end - t_start).count(); RCLCPP_DEBUG(m_logger, "qp solver calculation time = %ld [ms]", t); } if (Uex->array().isNaN().any()) { RCLCPP_WARN_SKIPFIRST_THROTTLE( m_logger, *m_clock, 1000 /*ms*/, "model Uex includes NaN, stop MPC."); return false; } return true; } void MPC::addSteerWeightR(Eigen::MatrixXd * R_ptr) const { const int64_t N = m_param.prediction_horizon; const float64_t DT = m_param.prediction_dt; auto & R = *R_ptr; /* add steering rate : weight for (u(i) - u(i-1) / dt )^2 */ { const float64_t steer_rate_r = m_param.weight_steer_rate / (DT * DT); const Eigen::Matrix2d D = steer_rate_r * (Eigen::Matrix2d() << 1.0, -1.0, -1.0, 1.0).finished(); for (int64_t i = 0; i < N - 1; ++i) { R.block(i, i, 2, 2) += D; } if (N > 1) { // steer rate i = 0 R(0, 0) += m_param.weight_steer_rate / (m_ctrl_period * m_ctrl_period); } } /* add steering acceleration : weight for { (u(i+1) - 2*u(i) + u(i-1)) / dt^2 }^2 */ { const float64_t w = m_param.weight_steer_acc; const float64_t steer_acc_r = w / std::pow(DT, 4); const float64_t steer_acc_r_cp1 = w / (std::pow(DT, 3) * m_ctrl_period); const float64_t steer_acc_r_cp2 = w / (std::pow(DT, 2) * std::pow(m_ctrl_period, 2)); const float64_t steer_acc_r_cp4 = w / std::pow(m_ctrl_period, 4); const Eigen::Matrix3d D = steer_acc_r * (Eigen::Matrix3d() << 1.0, -2.0, 1.0, -2.0, 4.0, -2.0, 1.0, -2.0, 1.0).finished(); for (int64_t i = 1; i < N - 1; ++i) { R.block(i - 1, i - 1, 3, 3) += D; } if (N > 1) { // steer acc i = 1 R(0, 0) += steer_acc_r * 1.0 + steer_acc_r_cp2 * 1.0 + steer_acc_r_cp1 * 2.0; R(1, 0) += steer_acc_r * -1.0 + steer_acc_r_cp1 * -1.0; R(0, 1) += steer_acc_r * -1.0 + steer_acc_r_cp1 * -1.0; R(1, 1) += steer_acc_r * 1.0; // steer acc i = 0 R(0, 0) += steer_acc_r_cp4 * 1.0; } } } void MPC::addSteerWeightF(Eigen::MatrixXd * f_ptr) const { if (f_ptr->rows() < 2) { return; } const float64_t DT = m_param.prediction_dt; auto & f = *f_ptr; // steer rate for i = 0 f(0, 0) += -2.0 * m_param.weight_steer_rate / (std::pow(DT, 2)) * 0.5; // const float64_t steer_acc_r = m_param.weight_steer_acc / std::pow(DT, 4); const float64_t steer_acc_r_cp1 = m_param.weight_steer_acc / (std::pow(DT, 3) * m_ctrl_period); const float64_t steer_acc_r_cp2 = m_param.weight_steer_acc / (std::pow(DT, 2) * std::pow(m_ctrl_period, 2)); const float64_t steer_acc_r_cp4 = m_param.weight_steer_acc / std::pow(m_ctrl_period, 4); // steer acc i = 0 f(0, 0) += ((-2.0 * m_raw_steer_cmd_prev + m_raw_steer_cmd_pprev) * steer_acc_r_cp4) * 0.5; // steer acc for i = 1 f(0, 0) += (-2.0 * m_raw_steer_cmd_prev * (steer_acc_r_cp1 + steer_acc_r_cp2)) * 0.5; f(0, 1) += (2.0 * m_raw_steer_cmd_prev * steer_acc_r_cp1) * 0.5; } float64_t MPC::getPredictionTime() const { return static_cast<float64_t>(m_param.prediction_horizon - 1) * m_param.prediction_dt + m_param.input_delay + m_ctrl_period; } bool8_t MPC::isValid(const MPCMatrix & m) const { if ( m.Aex.array().isNaN().any() || m.Bex.array().isNaN().any() || m.Cex.array().isNaN().any() || m.Wex.array().isNaN().any() || m.Qex.array().isNaN().any() || m.R1ex.array().isNaN().any() || m.R2ex.array().isNaN().any() || m.Uref_ex.array().isNaN().any()) { return false; } if ( m.Aex.array().isInf().any() || m.Bex.array().isInf().any() || m.Cex.array().isInf().any() || m.Wex.array().isInf().any() || m.Qex.array().isInf().any() || m.R1ex.array().isInf().any() || m.R2ex.array().isInf().any() || m.Uref_ex.array().isInf().any()) { return false; } return true; } } // namespace trajectory_follower } // namespace control } // namespace motion } // namespace autoware
#include <Switch/Switch> using namespace System; namespace Examples { // The following class represents simple functionality of the trapezoid. class MathTrapezoidSample : public object { public: // The main entry point for the application. static void Main() { MathTrapezoidSample trpz(20.0, 10.0, 8.0, 6.0); Console::WriteLine("The trapezoid's bases are 20.0 and 10.0, the trapezoid's legs are 8.0 and 6.0"); double h = trpz.GetHeight(); Console::WriteLine("Trapezoid height is: {0}", h); double dxR = trpz.GetLeftBaseRadianAngle(); Console::WriteLine("Trapezoid left base angle is: {0} Radians", dxR); double dyR = trpz.GetRightBaseRadianAngle(); Console::WriteLine("Trapezoid right base angle is: {0} Radians", dyR); double dxD = trpz.GetLeftBaseDegreeAngle(); Console::WriteLine("Trapezoid left base angle is: {0} Degrees", dxD); double dyD = trpz.GetRightBaseDegreeAngle(); Console::WriteLine("Trapezoid left base angle is: {0} Degrees", dyD); } MathTrapezoidSample(double longbase, double shortbase, double leftLeg, double rightLeg) { this->longBase = Math::Abs(longbase); this->shortBase = Math::Abs(shortbase); this->leftLeg = Math::Abs(leftLeg); this->rightLeg = Math::Abs(rightLeg); } double GetHeight() { double x = GetRightSmallBase(); return Math::Sqrt(Math::Pow(this->rightLeg, 2.0) - Math::Pow(x, 2.0)); } double GetSquare() { return GetHeight() * this->longBase / 2.0; } double GetLeftBaseRadianAngle() { double sinX = GetHeight() / this->leftLeg; return Math::Round(Math::Asin(sinX), 2); } double GetRightBaseRadianAngle() { double x = GetRightSmallBase(); double cosX = (Math::Pow(this->rightLeg, 2.0) + Math::Pow(x, 2.0) - Math::Pow(GetHeight(), 2.0)) / (2 * x * this->rightLeg); return Math::Round(Math::Acos(cosX), 2); } double GetLeftBaseDegreeAngle() { double x = GetLeftBaseRadianAngle() * 180 / Math::PI; return Math::Round(x, 2); } double GetRightBaseDegreeAngle() { double x = GetRightBaseRadianAngle() * 180 / Math::PI; return Math::Round(x, 2); } private: double longBase; double shortBase; double leftLeg; double rightLeg; double GetRightSmallBase() { return (Math::Pow(this->rightLeg, 2.0) - Math::Pow(this->leftLeg, 2.0) + Math::Pow(this->longBase, 2.0) + Math::Pow(this->shortBase, 2.0) - 2 * this->shortBase * this->longBase) / (2 * (this->longBase - this->shortBase)); } }; } startup_(Examples::MathTrapezoidSample); // This code produces the following output: // // The trapezoid's bases are 20.0 and 10.0, the trapezoid's legs are 8.0 and 6.0 // Trapezoid height is: 4.8 // Trapezoid left base angle is: 0.64 Radians // Trapezoid right base angle is: 0.93 Radians // Trapezoid left base angle is: 36.67 Degrees // Trapezoid left base angle is: 53.29 Degrees
#include <limits> #include <type_traits> #include "zserio/CppRuntimeException.h" #include "zserio/VarSizeUtil.h" namespace zserio { uint32_t convertSizeToUInt32(size_t value) { #ifdef ZSERIO_RUNTIME_64BIT if (value > static_cast<size_t>(std::numeric_limits<uint32_t>::max())) { throw CppRuntimeException("VarSizeUtil: Size value '") + value + "' is out of bounds for conversion to uint32_t type!"; } #endif return static_cast<uint32_t>(value); } } // namespace zserio